diff --git a/.gitmodules b/.gitmodules index 8523e88..96c02ec 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "projects/github.com/cue-sh/unity-example"] path = projects/github.com/cue-sh/unity-example url = https://github.com/cue-sh/unity-example +[submodule "projects/github.com/timberio/vector"] + path = projects/github.com/timberio/vector + url = https://github.com/timberio/vector diff --git a/overlays/projects/github.com/timberio/vector/eval.txt b/overlays/projects/github.com/timberio/vector/eval.txt new file mode 100644 index 0000000..ea43081 --- /dev/null +++ b/overlays/projects/github.com/timberio/vector/eval.txt @@ -0,0 +1,123183 @@ +# Verify that we can do a plain eval against all the .cue files that make up +# the vector project. Note, the command line arguments below are the result +# of running: +# +# find docs -name '*.cue' +# +# in the root of the vector project (note this is the command used in +# scripts/cue.sh for discovering such files). For now, CUE does not have a way +# of handling a single package that is split across a directory hierarchy in +# this way, but per discussions with @binarylogic the pattern works incredibly +# well for the vector usecase. So is something we need to consider. + +cue eval docs/reference/releases/0.6.0.cue docs/reference/releases/0.9.0.cue docs/reference/releases/0.7.2.cue docs/reference/releases/0.9.2.cue docs/reference/releases/0.5.0.cue docs/reference/releases/0.9.1.cue docs/reference/releases/0.4.0.cue docs/reference/releases/0.8.0.cue docs/reference/releases/0.12.0.cue docs/reference/releases/0.11.0.cue docs/reference/releases/0.8.2.cue docs/reference/releases/0.8.1.cue docs/reference/releases/0.11.1.cue docs/reference/releases/0.7.0.cue docs/reference/releases/0.10.0.cue docs/reference/releases/0.7.1.cue docs/reference/services/clickhouse.cue docs/reference/services/pulsar.cue docs/reference/services/vector.cue docs/reference/services/azure_monitor_logs.cue docs/reference/services/gcp_pubsub.cue docs/reference/services/aws_sqs.cue docs/reference/services/datadog.cue docs/reference/services/nginx.cue docs/reference/services/host.cue docs/reference/services/aws_s3.cue docs/reference/services/files.cue docs/reference/services/datadog_metrics.cue docs/reference/services/nats.cue docs/reference/services/mongodb.cue docs/reference/services/aws_cloudwatch_logs.cue docs/reference/services/elasticsearch.cue docs/reference/services/prometheus.cue docs/reference/services/syslog.cue docs/reference/services/apache_http.cue docs/reference/services/humio.cue docs/reference/services/kubernetes.cue docs/reference/services/gcp_operations_logs.cue docs/reference/services/sematext.cue docs/reference/services/stdout.cue docs/reference/services/influxdb.cue docs/reference/services/papertrail.cue docs/reference/services/loki.cue docs/reference/services/aws_cloudwatch.cue docs/reference/services/http.cue docs/reference/services/datadog_logs.cue docs/reference/services/kafka.cue docs/reference/services/aws_kinesis_data_streams.cue docs/reference/services/aws_ecs.cue docs/reference/services/aws_kinesis_firehose.cue docs/reference/services/socket_recevier.cue docs/reference/services/journald.cue docs/reference/services/honeycomb.cue docs/reference/services/logdna.cue docs/reference/services/splunk.cue docs/reference/services/docker.cue docs/reference/services/aws_cloudwatch_metrics.cue docs/reference/services/heroku.cue docs/reference/services/prometheus_client.cue docs/reference/services/new_relic_logs.cue docs/reference/services/statsd_receiver.cue docs/reference/services/stdin.cue docs/reference/services/gcp_cloud_storage.cue docs/reference/services/socket_client.cue docs/reference/services/statsd.cue docs/reference/remap.cue docs/reference/urls.cue docs/reference/team.cue docs/reference/remap/expressions/comparison.cue docs/reference/remap/expressions/path.cue docs/reference/remap/expressions/arithmetic.cue docs/reference/remap/expressions/coalesce.cue docs/reference/remap/expressions/logical.cue docs/reference/remap/expressions/index.cue docs/reference/remap/expressions/variable.cue docs/reference/remap/expressions/function_call.cue docs/reference/remap/expressions/if.cue docs/reference/remap/expressions/assignment.cue docs/reference/remap/expressions/block.cue docs/reference/remap/concepts/expression.cue docs/reference/remap/concepts/literal.cue docs/reference/remap/concepts/program.cue docs/reference/remap/concepts/function.cue docs/reference/remap/concepts/event.cue docs/reference/remap/features.cue docs/reference/remap/errors.cue docs/reference/remap/functions/join.cue docs/reference/remap/functions/to_syslog_level.cue docs/reference/remap/functions/object.cue docs/reference/remap/functions/to_timestamp.cue docs/reference/remap/functions/length.cue docs/reference/remap/functions/ip_cidr_contains.cue docs/reference/remap/functions/ceil.cue docs/reference/remap/functions/parse_grok.cue docs/reference/remap/functions/ipv6_to_ipv4.cue docs/reference/remap/functions/downcase.cue docs/reference/remap/functions/to_syslog_facility.cue docs/reference/remap/functions/sha2.cue docs/reference/remap/functions/ends_with.cue docs/reference/remap/functions/parse_key_value.cue docs/reference/remap/functions/parse_aws_cloudwatch_log_subscription_message.cue docs/reference/remap/functions/to_float.cue docs/reference/remap/functions/uuid_v4.cue docs/reference/remap/functions/parse_regex_all.cue docs/reference/remap/functions/format_timestamp.cue docs/reference/remap/functions/parse_timestamp.cue docs/reference/remap/functions/strip_ansi_escape_codes.cue docs/reference/remap/functions/format_number.cue docs/reference/remap/functions/compact.cue docs/reference/remap/functions/parse_glog.cue docs/reference/remap/functions/exists.cue docs/reference/remap/functions/push.cue docs/reference/remap/functions/parse_duration.cue docs/reference/remap/functions/sha1.cue docs/reference/remap/functions/to_string.cue docs/reference/remap/functions/assert.cue docs/reference/remap/functions/now.cue docs/reference/remap/functions/parse_aws_vpc_flow_log.cue docs/reference/remap/functions/get_hostname.cue docs/reference/remap/functions/parse_tokens.cue docs/reference/remap/functions/includes.cue docs/reference/remap/functions/replace.cue docs/reference/remap/functions/truncate.cue docs/reference/remap/functions/flatten.cue docs/reference/remap/functions/to_int.cue docs/reference/remap/functions/parse_url.cue docs/reference/remap/functions/append.cue docs/reference/remap/functions/to_syslog_severity.cue docs/reference/remap/functions/del.cue docs/reference/remap/functions/log.cue docs/reference/remap/functions/array.cue docs/reference/remap/functions/float.cue docs/reference/remap/functions/decode_base64.cue docs/reference/remap/functions/merge.cue docs/reference/remap/functions/string.cue docs/reference/remap/functions/starts_with.cue docs/reference/remap/functions/int.cue docs/reference/remap/functions/parse_common_log.cue docs/reference/remap/functions/strip_whitespace.cue docs/reference/remap/functions/slice.cue docs/reference/remap/functions/round.cue docs/reference/remap/functions/split.cue docs/reference/remap/functions/contains.cue docs/reference/remap/functions/md5.cue docs/reference/remap/functions/encode_json.cue docs/reference/remap/functions/to_bool.cue docs/reference/remap/functions/floor.cue docs/reference/remap/functions/parse_syslog.cue docs/reference/remap/functions/get_env_var.cue docs/reference/remap/functions/ip_subnet.cue docs/reference/remap/functions/parse_aws_alb_log.cue docs/reference/remap/functions/to_unix_timestamp.cue docs/reference/remap/functions/encode_base64.cue docs/reference/remap/functions/timestamp.cue docs/reference/remap/functions/sha3.cue docs/reference/remap/functions/parse_regex.cue docs/reference/remap/functions/bool.cue docs/reference/remap/functions/upcase.cue docs/reference/remap/functions/parse_json.cue docs/reference/remap/functions/is_nullish.cue docs/reference/remap/functions/ip_to_ipv6.cue docs/reference/remap/functions/match.cue docs/reference/remap/literals.cue docs/reference/remap/features/compilation.cue docs/reference/remap/features/ergonomic_safety.cue docs/reference/remap/features/logs_and_metrics.cue docs/reference/remap/features/stateless.cue docs/reference/remap/features/type_safety.cue docs/reference/remap/features/memory_safety.cue docs/reference/remap/features/fail_safety.cue docs/reference/remap/features/quality_error_messages.cue docs/reference/remap/features/native.cue docs/reference/remap/errors/106_function_argument_arity_mismatch.cue docs/reference/remap/errors/100_unhandled_root_runtime_error.cue docs/reference/remap/errors/109_cannot_abort_function.cue docs/reference/remap/errors/107_required_function_argument_missing.cue docs/reference/remap/errors/104_unnecessary_error_assignment.cue docs/reference/remap/errors/110_invalid_argument_type.cue docs/reference/remap/errors/101_malformed_regex_literal.cue docs/reference/remap/errors/108_unknown_function_argument_keyword.cue docs/reference/remap/errors/103_unhandled_assignment_runtime_error.cue docs/reference/remap/errors/102_non_boolean_if_expression_predicate.cue docs/reference/remap/errors/105_undefined_function.cue docs/reference/remap/expressions.cue docs/reference/remap/literals/object.cue docs/reference/remap/literals/boolean.cue docs/reference/remap/literals/integer.cue docs/reference/remap/literals/array.cue docs/reference/remap/literals/float.cue docs/reference/remap/literals/string.cue docs/reference/remap/literals/null.cue docs/reference/remap/literals/timestamp.cue docs/reference/remap/literals/regular_expression.cue docs/reference/remap/syntax/whitespace.cue docs/reference/remap/syntax/comment.cue docs/reference/remap/syntax/keywords.cue docs/reference/remap/principles/safety.cue docs/reference/remap/principles/performance.cue docs/reference/remap/functions.cue docs/reference/remap/principles.cue docs/reference/remap/concepts.cue docs/reference/remap/syntax.cue docs/reference/components.cue docs/reference/data_model.cue docs/reference/process.cue docs/reference/cli.cue docs/reference/api.cue docs/reference/configuration.cue docs/reference/services.cue docs/reference/releases.cue docs/reference/installation.cue docs/reference/components/sinks/clickhouse.cue docs/reference/components/sinks/pulsar.cue docs/reference/components/sinks/humio_logs.cue docs/reference/components/sinks/vector.cue docs/reference/components/sinks/azure_monitor_logs.cue docs/reference/components/sinks/gcp_pubsub.cue docs/reference/components/sinks/console.cue docs/reference/components/sinks/influxdb_metrics.cue docs/reference/components/sinks/aws_sqs.cue docs/reference/components/sinks/datadog.cue docs/reference/components/sinks/socket.cue docs/reference/components/sinks/influxdb_logs.cue docs/reference/components/sinks/prometheus_remote_write.cue docs/reference/components/sinks/aws_s3.cue docs/reference/components/sinks/datadog_metrics.cue docs/reference/components/sinks/nats.cue docs/reference/components/sinks/aws_cloudwatch_logs.cue docs/reference/components/sinks/elasticsearch.cue docs/reference/components/sinks/humio.cue docs/reference/components/sinks/aws_kinesis_streams.cue docs/reference/components/sinks/splunk_hec.cue docs/reference/components/sinks/sematext_logs.cue docs/reference/components/sinks/prometheus_exporter.cue docs/reference/components/sinks/sematext.cue docs/reference/components/sinks/influxdb.cue docs/reference/components/sinks/papertrail.cue docs/reference/components/sinks/loki.cue docs/reference/components/sinks/aws_cloudwatch.cue docs/reference/components/sinks/humio_metrics.cue docs/reference/components/sinks/http.cue docs/reference/components/sinks/datadog_logs.cue docs/reference/components/sinks/kafka.cue docs/reference/components/sinks/gcp.cue docs/reference/components/sinks/aws_kinesis_firehose.cue docs/reference/components/sinks/sematext_metrics.cue docs/reference/components/sinks/honeycomb.cue docs/reference/components/sinks/gcp_stackdriver_logs.cue docs/reference/components/sinks/logdna.cue docs/reference/components/sinks/blackhole.cue docs/reference/components/sinks/aws_cloudwatch_metrics.cue docs/reference/components/sinks/file.cue docs/reference/components/sinks/new_relic_logs.cue docs/reference/components/sinks/gcp_cloud_storage.cue docs/reference/components/sinks/statsd.cue docs/reference/components/aws.cue docs/reference/components/transforms.cue docs/reference/components/transforms/remove_tags.cue docs/reference/components/transforms/metric_to_log.cue docs/reference/components/transforms/remap.cue docs/reference/components/transforms/ansi_stripper.cue docs/reference/components/transforms/wasm.cue docs/reference/components/transforms/add_fields.cue docs/reference/components/transforms/coercer.cue docs/reference/components/transforms/logfmt_parser.cue docs/reference/components/transforms/aws_ec2_metadata.cue docs/reference/components/transforms/filter.cue docs/reference/components/transforms/rename_fields.cue docs/reference/components/transforms/lua.cue docs/reference/components/transforms/aws_cloudwatch_logs_subscription_parser.cue docs/reference/components/transforms/reduce.cue docs/reference/components/transforms/tag_cardinality_limit.cue docs/reference/components/transforms/add_tags.cue docs/reference/components/transforms/remove_fields.cue docs/reference/components/transforms/log_to_metric.cue docs/reference/components/transforms/geoip.cue docs/reference/components/transforms/dedupe.cue docs/reference/components/transforms/json_parser.cue docs/reference/components/transforms/merge.cue docs/reference/components/transforms/route.cue docs/reference/components/transforms/regex_parser.cue docs/reference/components/transforms/concat.cue docs/reference/components/transforms/sample.cue docs/reference/components/transforms/split.cue docs/reference/components/transforms/key_value_parser.cue docs/reference/components/transforms/grok_parser.cue docs/reference/components/transforms/tokenizer.cue docs/reference/components/kafka.cue docs/reference/components/splunk.cue docs/reference/components/sources.cue docs/reference/components/sources/vector.cue docs/reference/components/sources/postgresql_metrics.cue docs/reference/components/sources/kubernetes_logs.cue docs/reference/components/sources/host_metrics.cue docs/reference/components/sources/generator.cue docs/reference/components/sources/socket.cue docs/reference/components/sources/prometheus_remote_write.cue docs/reference/components/sources/aws_s3.cue docs/reference/components/sources/mongodb_metrics.cue docs/reference/components/sources/syslog.cue docs/reference/components/sources/apache_metrics.cue docs/reference/components/sources/splunk_hec.cue docs/reference/components/sources/docker_logs.cue docs/reference/components/sources/nginx_metrics.cue docs/reference/components/sources/http.cue docs/reference/components/sources/kafka.cue docs/reference/components/sources/internal_logs.cue docs/reference/components/sources/aws_kinesis_firehose.cue docs/reference/components/sources/internal_metrics.cue docs/reference/components/sources/journald.cue docs/reference/components/sources/prometheus_scrape.cue docs/reference/components/sources/file.cue docs/reference/components/sources/stdin.cue docs/reference/components/sources/heroku_logs.cue docs/reference/components/sources/aws_ecs_metrics.cue docs/reference/components/sources/statsd.cue docs/reference/components/sinks.cue docs/reference/data_model/schema.cue docs/reference/installation/operating_systems/macos.cue docs/reference/installation/operating_systems/raspbian.cue docs/reference/installation/operating_systems/centos.cue docs/reference/installation/operating_systems/windows.cue docs/reference/installation/operating_systems/amazon_linux.cue docs/reference/installation/operating_systems/nixos.cue docs/reference/installation/operating_systems/ubuntu.cue docs/reference/installation/operating_systems/debian.cue docs/reference/installation/operating_systems/rhel.cue docs/reference/installation/package_managers.cue docs/reference/installation/platforms/kubernetes.cue docs/reference/installation/platforms/docker.cue docs/reference/installation/roles/aggregator.cue docs/reference/installation/roles/agent.cue docs/reference/installation/interfaces.cue docs/reference/installation/_interfaces/vector-installer.cue docs/reference/installation/_interfaces/dpkg.cue docs/reference/installation/_interfaces/docker-cli.cue docs/reference/installation/_interfaces/kubectl.cue docs/reference/installation/_interfaces/yum.cue docs/reference/installation/_interfaces/helm3.cue docs/reference/installation/_interfaces/nix.cue docs/reference/installation/_interfaces/apt.cue docs/reference/installation/_interfaces/role_implementations/file_agent.cue docs/reference/installation/_interfaces/role_implementations/file_sidecar.cue docs/reference/installation/_interfaces/role_implementations/journald_agent.cue docs/reference/installation/_interfaces/role_implementations/vector_aggregator.cue docs/reference/installation/_interfaces/homebrew.cue docs/reference/installation/_interfaces/rpm.cue docs/reference/installation/_interfaces/msi.cue docs/reference/installation/roles.cue docs/reference/installation/operating_systems.cue docs/reference/installation/platforms.cue docs/reference/installation/package_managers/helm.cue docs/reference/installation/package_managers/dpkg.cue docs/reference/installation/package_managers/yum.cue docs/reference/installation/package_managers/nix.cue docs/reference/installation/package_managers/apt.cue docs/reference/installation/package_managers/homebrew.cue docs/reference/installation/package_managers/rpm.cue docs/reference/installation/package_managers/msi.cue docs/reference/installation/downloads.cue docs/reference/installation/downloads/arm64-deb.cue docs/reference/installation/downloads/x86_64-unknown-linux-musl-tar-gz.cue docs/reference/installation/downloads/aarch64-rpm.cue docs/reference/installation/downloads/amd64-deb.cue docs/reference/installation/downloads/aarch64-unknown-linux-musl-tar-gz.cue docs/reference/installation/downloads/x86_64-pc-windows-msvc-zip.cue docs/reference/installation/downloads/armv7-unknown-linux-musleabihf-tar-gz.cue docs/reference/installation/downloads/armv7-unknown-linux-gnueabihf-tar-gz.cue docs/reference/installation/downloads/x86_64-rpm.cue docs/reference/installation/downloads/x86_64-apple-darwin-tar-gz.cue docs/reference/installation/downloads/armhf-deb.cue docs/reference/installation/downloads/x64-msi.cue docs/reference/installation/downloads/armv7-rpm.cue docs/guides/integrate.cue docs/reference.cue +cmp stdout $WORK/stdout.golden + +-- stdout.golden -- +import "strings" + +#Any: _ +#Arch: "ARM64" | "ARMv7" | "x86_64" +#CompressionAlgorithm: "none" | "gzip" | "lz4" | "snappy" | "zstd" +#CompressionLevel: "none" | "fast" | "default" | "best" | >=0 & <=9 +#Date: =~"^\\d{4}-\\d{2}-\\d{2}" +#DeliveryStatus: "at_least_once" | "best_effort" +#DeploymentRole: "aggregator" | "daemon" | "sidecar" +#DevelopmentStatus: "beta" | "stable" | "deprecated" +#EncodingCodec: "json" | "ndjson" | "text" +#Endpoint: { + description: string + responses: {} +} +#Endpoints: {} +#Enum: {} +#Event: {} | {} +#EventType: "log" | "metric" +#Fields: {} +#Interface: { + binary: { + name: string + } +} | { + ffi: {} +} | { + file_system: { + directory: string + } +} | { + socket: #InterfaceSocket +} | { + stdin: {} +} | { + stdout: {} +} +#InterfaceBinary: { + name: string +} +#InterfaceFileSystem: { + directory: string +} +#InterfaceSocket: { + api?: { + title: string + url: string + } + direction: "incoming" | "outgoing" + if direction == "outgoing" { + network_hops?: uint8 + permissions?: #Permissions + } + if direction == "incoming" { + port: uint16 + } + protocols: [#Protocol, ...#Protocol] + socket?: string + ssl: "disabled" | "required" | "optional" +} +#HowItWorks: {} +#LogEvent: {} +#Map: {} +#MetricEvent: { + kind: "incremental" | "absolute" + name: string + tags: {} + counter: { + value: float + } +} | { + kind: "incremental" | "absolute" + name: string + tags: {} + distribution: { + samples: [{ + value: float + rate: >=0 & int + }] + statistic: "histogram" | "summary" + } +} | { + kind: "incremental" | "absolute" + name: string + tags: {} + gauge: { + value: float + } +} | { + kind: "incremental" | "absolute" + name: string + tags: {} + histogram: { + buckets: [{ + upper_limit: float + count: >=0 & int + }] + count: >=0 & int + sum: float + } +} | { + kind: "incremental" | "absolute" + name: string + tags: {} + set: { + values: [string] + } +} | { + kind: "incremental" | "absolute" + name: string + tags: {} + summary: { + quantiles: [{ + upper_limit: float + value: float + }] + count: int + sum: float + } +} +#MetricEventCounter: { + value: float +} +#MetricEventDistribution: { + samples: [{ + value: float + rate: >=0 & int + }] + statistic: "histogram" | "summary" +} +#DistributionSample: { + value: float + rate: >=0 & int +} +#MetricEventGauge: { + value: float +} +#MetricEventHistogram: { + buckets: [{ + upper_limit: float + count: >=0 & int + }] + count: >=0 & int + sum: float +} +#HistogramBucket: { + upper_limit: float + count: >=0 & int +} +#MetricEventSet: { + values: [string] +} +#MetricEventSummary: { + quantiles: [{ + upper_limit: float + value: float + }] + count: int + sum: float +} +#SummaryQuantile: { + upper_limit: float + value: float +} +#MetricTags: {} +#MetricType: "counter" | "distribution" | "gauge" | "histogram" | "summary" +#Object: {} +#OperatingSystemFamily: "Linux" | "macOS" | "Windows" +#Permissions: { + unix: { + group: string + } +} +#Protocol: "http" | "tcp" | "udp" | "unix" +#Service: { + name: string + thing: string + url: string + versions: string | null +} +#SetupStep: { + title: string + description: string +} +#SetupSteps: [{ + title: string + description: string +}] +#Schema: {} +#SchemaField: { + category?: string + if type.object != _|_ // explicit error (_|_ literal) in source + { + category: strings.ToTitle(name) + } + description: string + groups?: [string, ...string] + name: string + relevant_when?: string + required: bool + warnings: [...string] + if !required { + common: bool + } + sort?: int8 + type: #Type & { + _args: { + required: required + } + } +} +#TargetTriples: { + "aarch64-unknown-linux-gnu": bool + "aarch64-unknown-linux-musl": bool + "armv7-unknown-linux-gnueabihf": bool + "armv7-unknown-linux-musleabihf": bool + "x86_64-apple-darwin": bool + "x86_64-pc-windows-msv": bool + "x86_64-unknown-linux-gnu": bool + "x86_64-unknown-linux-musl": bool +} +#Timestamp: =~"^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{6}Z" +let Args = _args +let Args_1 = _args +let Args_85 = _args +let Args_BA = _args +let Args_57C8 = _args +let Args_35B7E = _args +#Type: { + array: #TypeArray & { + _args: { + required: Args.required + } + } +} | { + "*": {} +} | { + bool: #TypeBool & { + _args: { + required: Args_1.required + } + } +} | { + float: #TypeFloat & { + _args: { + required: Args_85.required + } + } +} | { + object: { + examples: [] + options: {} + } +} | { + string: #TypeString & { + _args: { + required: Args_BA.required + } + } +} | { + timestamp: #TypeTimestamp & { + _args: { + required: Args_57C8.required + } + } +} | { + uint: #TypeUint & { + _args: { + required: Args_35B7E.required + } + } +} +let Args_457DB = _args +let Args_9E7542B = _args +let Args_EBA4C9E8 = _args +let Args_94E177227 = _args +let Args_3318F1204D = _args +#TypePrimitive: { + "*": {} +} | { + bool: #TypeBool & { + _args: { + required: Args_457DB.required + } + } +} | { + float: #TypeFloat & { + _args: { + required: Args_9E7542B.required + } + } +} | { + object: { + examples: [] + options: {} + } +} | { + string: #TypeString & { + _args: { + required: Args_EBA4C9E8.required + } + } +} | { + timestamp: #TypeTimestamp & { + _args: { + required: Args_94E177227.required + } + } +} | { + uint: #TypeUint & { + _args: { + required: Args_3318F1204D.required + } + } +} +#TypeArray: { + _args: { + required: bool + } + if !Args_5AA7B24C2B.required { + default: [...] | null + } + items: { + type: #TypePrimitive & { + _args: { + required: true + } + } + } +} +#TypeBool: { + _args: { + required: bool + } + if !Args_78A20F8F012B.required { + default: bool | null + } +} +#TypeFloat: { + _args: { + required: bool + } + if !Args_5673F3BDD18DD.required { + default: float | null + } + examples?: [float, ...float] +} +#TypeObject: { + examples: [] + options: {} +} +#TypeString: { + _args: { + required: bool + } + if !Args_130575EB3AC2BE.required { + default: string | null + } + enum?: #Enum + examples?: [...string] + if Args_130575EB3AC2BE.required && enum != _|_ // explicit error (_|_ literal) in source + { + examples: [string, ...string] | *[ for k, v in enum { + k + }] + } + syntax: "file_system_path" | "field_path" | "literal" | "template" | "regex" | "remap_boolean_expression" | "remap_program" | "strftime" +} +#TypeTimestamp: { + _args: { + required: bool + } + if !Args_5947D041B4FE94.required { + default: #Timestamp | null + } + examples: [_values.current_timestamp] +} +#TypeUint: { + _args: { + required: bool + } + if !Args_6D72D565E8172F.required { + default: int & >=0 | null + } + examples?: [int & >=0, ...int & >=0] + unit: #Unit | null +} +#Unit: "bytes" | "events" | "milliseconds" | "requests" | "seconds" +components: { + #Classes: { + _args: { + kind: string + } + commonly_used: bool + if Args_8A3CEFC9124204.kind == "source" || Args_8A3CEFC9124204.kind == "sink" { + delivery: #DeliveryStatus + } + if Args_8A3CEFC9124204.kind == "source" { + deployment_roles: [...#DeploymentRole] + } + development: #DevelopmentStatus + egress_method: #EgressMethod + if Args_8A3CEFC9124204.kind == "sink" { + service_providers: [string, ...string] | *[] + } + stateful: bool + } + #Component: { + kind: #ComponentKind + installation?: { + platform_name: string | null + } + configuration: #Schema + description?: =~"[.]$" + env_vars: #EnvVars + alias?: !="" + type: string + classes: #Classes & { + _args: { + kind: Kind + } + } + #ExampleConfig: { + title: string + context?: string + configuration: { + ... + for k, v in configuration { + "\(k)": _ | *null + } + } + if Kind == "source" { + input: string + } + if Kind != "source" { + input: #Event | [#Event, ...#Event] + } + if Kind == "sink" { + output: string + } + if Kind != "sink" { + output: #Event | [#Event, ...#Event] | null + } + notes?: string + } + examples?: [#ExampleConfig, ...#ExampleConfig] + features: #Features & { + _args: { + egress_method: classes.egress_method + kind: Kind + } + } + how_it_works: #HowItWorks + if Kind != "source" { + input: #Input + } + if Kind != "sink" { + output: #Output + } + support: #Support & { + _args: { + kind: Kind + } + } + title: string + permissions?: { + iam: [#IAM, ...#IAM] + } + telemetry: { + metrics: #MetricOutput + } + } + #ComponentKind: "sink" | "source" | "transform" + #Components: {} + #EgressMethod: "batch" | "dynamic" | "expose" | "stream" + #EnvVars: {} + #Features: { + _args: { + egress_method: string + kind: string + } + if Args_D72850F3BFCC90.kind == "source" { + collect?: #FeaturesCollect + generate?: #FeaturesGenerate + multiline: #FeaturesMultiline + encoding?: #FeaturesEncoding + receive?: #FeaturesReceive + } + if Args_D72850F3BFCC90.kind == "transform" { + convert?: #FeaturesConvert + enrich?: #FeaturesEnrich + filter?: #FeaturesFilter + parse?: #FeaturesParse + program?: #FeaturesProgram + reduce?: #FeaturesReduce + route?: #FeaturesRoute + sanitize?: #FeaturesSanitize + shape?: #FeaturesShape + } + if Args_D72850F3BFCC90.kind == "sink" { + buffer: { + enabled: bool | string + } + healthcheck: { + enabled: bool + } + exposes?: #FeaturesExpose + send?: #FeaturesSend & { + _args: Args_69E503E786FD88 + } + } + descriptions: { + [Name=string]: string + } + } + #FeaturesCollect: { + checkpoint: { + enabled: bool + } + } + #FeaturesConvert: {} + #FeaturesEnrich: { + from: { + service: { + name: string + url: string + versions: string | null + } + } + } + #FeaturesExpose: { + tls: #FeaturesTLS & { + _args: { + mode: "accept" + } + } + for: { + service: { + name: string + thing: string + url: string + versions: string | null + } + } + } + #FeaturesFilter: {} + #FeaturesGenerate: {} + #FeaturesSendBufferBytes: { + enabled: bool + } + #FeaturesReceiveBufferBytes: { + enabled: bool + } + #FeaturesKeepalive: { + enabled: bool + } + #FeaturesMultiline: { + enabled: bool + } + #FeaturesEncoding: { + enabled: bool + } + #FeaturesParse: { + format: { + name: string + url: string | null + versions: string | null + } + } + #FeaturesProgram: { + runtime: { + name: string + url: string + version: string | null + } + } + #FeaturesReceive: { + tls: #FeaturesTLS & { + _args: { + mode: "accept" + } + } + } + #FeaturesReduce: {} + #FeaturesRoute: {} + #FeaturesSanitize: {} + #FeaturesShape: {} + #FeaturesSend: { + _args: { + egress_method: string + kind: string + } + if Args_53A734D86B4FD6.egress_method == "batch" || Args_53A734D86B4FD6.egress_method == "dynamic" { + batch: { + enabled: bool + common: bool + max_bytes: int & >=0 | null + max_events: int & >=0 | null + timeout_secs: uint16 | null + } + } + compression: { + enabled: bool + if enabled == true { + default: #CompressionAlgorithm + algorithms: [#CompressionAlgorithm, ...#CompressionAlgorithm] + levels: [#CompressionLevel, ...#CompressionLevel] + } + } + encoding: { + enabled: bool + if enabled { + codec: { + enabled: bool + if enabled { + default: #EncodingCodec | null + enum: [#EncodingCodec, ...#EncodingCodec] | null + } + } + } + } + send_buffer_bytes?: #FeaturesSendBufferBytes + keepalive?: #FeaturesKeepalive + request: { + enabled: bool + if enabled { + adaptive_concurrency: bool | *true + concurrency: uint8 | *5 + rate_limit_duration_secs: uint8 + rate_limit_num: uint16 + retry_initial_backoff_secs: uint8 + retry_max_duration_secs: uint8 + timeout_secs: uint8 + headers: bool + } + } + tls: #FeaturesTLS & { + _args: { + mode: "connect" + } + } + to?: { + service: #Service + interface?: #Interface + } + } + #FeaturesTLS: { + _args: { + mode: "accept" | "connect" + } + enabled: bool + if enabled { + can_enable: bool + can_verify_certificate: bool + if Args_A9491FE7A2999F.mode == "connect" { + can_verify_hostname: bool + } + enabled_default: bool + } + } + #Input: { + logs: bool + metrics: { + counter: false + distribution: false + gauge: false + histogram: false + set: false + summary: false + } | null + } + #LogOutput: {} + #MetricInput: { + counter: false + distribution: false + gauge: false + histogram: false + set: false + summary: false + } + #MetricOutput: {} + #Output: {} + #IAM: { + #Policy: { + #RequiredFor: "write" | "healthcheck" + _action: !="" + required_for: *["write"] | [#RequiredFor, ...#RequiredFor] + docs_url: !="" + required_when?: !="" + if platform == "aws" { + docs_url: "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + if platform == "gcp" { + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "\(_service).\(_action)" + } + } + platform: "aws" | "gcp" + policies: [#Policy] + } + #Runtime: { + name: string + url: string + version: string | null + } + #Support: { + requirements: [] | null + targets: { + "aarch64-unknown-linux-gnu": bool + "aarch64-unknown-linux-musl": bool + "armv7-unknown-linux-gnueabihf": bool + "armv7-unknown-linux-musleabihf": bool + "x86_64-apple-darwin": bool + "x86_64-pc-windows-msv": bool + "x86_64-unknown-linux-gnu": bool + "x86_64-unknown-linux-musl": bool + } + warnings: [] | null + notices: [] | null + } + sources: { + internal_logs: { + kind: "source" + title: "Internal Logs" + description: "The internal logs source exposes all log and trace messages emitted by the running Vector instance." + classes: { + commonly_used: true + delivery: "at_least_once" + deployment_roles: ["aggregator", "daemon", "sidecar"] + development: "beta" + egress_method: "stream" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Vector instance" + thing: "a Vector instance" + url: "https://vector.dev/docs/" + versions: ">= 0.11.0" + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Vector instance context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + notices: [] + requirements: [] + warnings: [] + } + installation: { + platform_name: null + } + configuration: { + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + internal_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "internal_logs" + #ExampleConfig: { + title: string + configuration: { + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual log or trace message." + name: "line" + fields: { + message: { + description: "The textual message for this log or trace." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["Vector has started."] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the log or trace was generated." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + "*": { + description: "Each field from the original message is copied into the event." + name: "*" + required: true + warnings: [] + type: { + "*": {} + } + } + metadata: { + category: "Metadata" + description: "Metadata from the source log or trace event." + name: "metadata" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + kind: { + description: "What kind of call site caused this log or trace." + name: "kind" + required: true + warnings: [] + type: { + string: { + enum: { + event: "The call site is an event." + span: "The call site is a span." + } + examples: ["event", "span"] + syntax: "literal" + } + } + } + level: { + description: "The level of verbosity of the described span or event." + name: "level" + required: true + warnings: [] + type: { + string: { + enum: { + TRACE: "Designates very low priority, often extremely verbose, information." + DEBUG: "Designates lower priority information." + INFO: "Designates useful information." + WARN: "Designates hazardous situations." + ERROR: "Designates very serious errors." + } + examples: ["TRACE", "DEBUG", "INFO", "WARN", "ERROR"] + syntax: "literal" + } + } + } + module_path: { + description: "The path to the internal module where the span occurred" + name: "module_path" + required: true + warnings: [] + type: { + string: { + examples: ["vector::internal_events::heartbeat"] + syntax: "literal" + } + } + } + target: { + description: "Describes the part of the system where the span or event that this metadata describes occurred." + name: "target" + required: true + warnings: [] + type: { + string: { + examples: ["vector"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + } + how_it_works: { + limited_logs: { + #Subsection: { + title: string + body: string + } + name: "limited_logs" + title: "Logs are limited by startup options" + body: """ + At startup, the selection of log messages generated by + vector is set by a combination of the `$LOG` environment + variable and the `--quiet` and `--verbose` command-line + options. This internal logs source will only receive + logs that are generated by these options. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `internal_logs` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + stdin: { + kind: "source" + title: "STDIN" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["sidecar"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "STDIN" + thing: "the STDIN stream" + url: "https://en.wikipedia.org/wiki/Standard_streams#Standard_input_(stdin)" + versions: null + } + interface: { + stdin: {} + } + } + tls: { + enabled: false + } + } + descriptions: { + receive_context: "Enriches data with useful STDIN context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + host_key: { + category: "Context" + common: false + description: "The key name added to each event representing the current host. This can also be globally set via the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + max_length: { + common: false + description: "The maximum bytes size of a message before rest of it will be discarded." + name: "max_length" + required: false + warnings: [] + type: { + uint: { + default: 102400 + unit: "bytes" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + stdin: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "stdin" + #ExampleConfig: { + title: string + configuration: { + host_key: null + max_length: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual event from STDIN." + name: "line" + fields: { + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + message: { + description: "The raw line, unparsed." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["2019-02-13T19:48:34+00:00 [info] Started GET \"/\" for 127.0.0.1"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + examples: [{ + title: "STDIN line" + configuration: { + host_key: null + max_length: null + type: null + } + input: """ + ```text + 2019-02-13T19:48:34+00:00 [info] Started GET "/" for 127.0.0.1 + ``` + """ + output: { + log: { + timestamp: "2020-10-10T17:07:36.452332Z" + message: "2019-02-13T19:48:34+00:00 [info] Started GET \"/\" for 127.0.0.1" + host: "my-host.local" + } + } + }] + how_it_works: { + line_delimiters: { + #Subsection: { + title: string + body: string + } + name: "line_delimiters" + title: "Line Delimiters" + body: "Each line is read until a new line delimiter, the `0xA` byte, is found." + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `stdin` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + stdin_reads_failed_total: { + description: "The total number of errors reading from stdin." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "stdin_reads_failed_total" + } + } + } + } + file: { + kind: "source" + title: "File" + classes: { + commonly_used: true + delivery: "best_effort" + deployment_roles: ["daemon", "sidecar"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: true + } + from: { + service: { + name: "files" + thing: "files" + url: "https://en.wikipedia.org/wiki/File_system" + versions: null + } + interface: { + file_system: { + directory: "/var/log" + } + } + } + } + multiline: { + enabled: true + } + encoding: { + enabled: true + } + descriptions: { + collect_context: "Enriches data with useful files context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + multiline: "Merges multi-line logs into one event." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + exclude: { + common: false + description: "Array of file patterns to exclude. [Globbing](#globbing) is supported.*Takes precedence over the [`include` option](#include).*" + name: "exclude" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["/var/log/binary-file.log"] + syntax: "literal" + } + } + } + } + } + } + file_key: { + category: "Context" + common: false + description: "The key name added to each event with the full path of the file." + name: "file_key" + required: false + warnings: [] + type: { + string: { + default: "file" + examples: ["file"] + syntax: "literal" + } + } + } + fingerprint: { + common: false + category: "Fingerprint" + description: "Configuration for how the file source should identify files." + name: "fingerprint" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + strategy: { + common: false + description: "The strategy used to uniquely identify files. This is important for [checkpointing](#checkpointing) when file rotation is used." + name: "strategy" + required: false + warnings: [] + type: { + string: { + default: "checksum" + enum: { + checksum: "Read the first line of the file, skipping the first `ignored_header_bytes` bytes, to uniquely identify files via a checksum." + device_and_inode: "Uses the [device and inode](https://en.wikipedia.org/wiki/Inode) to unique identify files." + } + syntax: "literal" + } + } + } + ignored_header_bytes: { + common: false + description: "The number of bytes to skip ahead (or ignore) when generating a unique fingerprint. This is helpful if all files share a common header." + name: "ignored_header_bytes" + relevant_when: "strategy = \"checksum\"" + required: false + warnings: [] + type: { + uint: { + default: 0 + unit: "bytes" + } + } + } + } + } + } + } + glob_minimum_cooldown: { + common: false + description: "Delay between file discovery calls. This controls the interval at which Vector searches for files." + name: "glob_minimum_cooldown" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: "milliseconds" + } + } + } + host_key: { + category: "Context" + common: false + description: "The key name added to each event representing the current host. This can also be globally set via the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + ignore_not_found: { + common: false + description: "Ignore missing files when fingerprinting. This may be useful when used with source directories containing dangling symlinks." + name: "ignore_not_found" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + ignore_older: { + common: true + description: "Ignore files with a data modification date that does not exceed this age." + name: "ignore_older" + required: false + warnings: [] + type: { + uint: { + default: null + examples: [600] + unit: "seconds" + } + } + } + include: { + description: "Array of file patterns to include. [Globbing](#globbing) is supported." + name: "include" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["/var/log/**/*.log"] + syntax: "literal" + } + } + } + } + } + } + line_delimiter: { + common: false + description: "String sequence used to separate one file line from another" + name: "line_delimiter" + required: false + warnings: [] + type: { + string: { + default: """ + + + """ + examples: ["\r\n"] + syntax: "literal" + } + } + } + max_line_bytes: { + common: false + description: "The maximum number of a bytes a line can contain before being discarded. This protects against malformed lines or tailing incorrect files." + name: "max_line_bytes" + required: false + warnings: [] + type: { + uint: { + default: 102400 + unit: "bytes" + } + } + } + max_read_bytes: { + category: "Reading" + common: false + description: "An approximate limit on the amount of data read from a single file at a given time." + name: "max_read_bytes" + required: false + warnings: [] + type: { + uint: { + default: null + examples: [2048] + unit: "bytes" + } + } + } + oldest_first: { + category: "Reading" + common: false + description: "Instead of balancing read capacity fairly across all watched files, prioritize draining the oldest files before moving on to read data from younger files." + name: "oldest_first" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + remove_after: { + common: false + description: "Timeout from reaching `eof` after which file will be removed from filesystem, unless new data is written in the meantime. If not specified, files will not be removed." + name: "remove_after" + required: false + warnings: ["Vector's process must have permission to delete files."] + type: { + uint: { + default: null + examples: [0, 5, 60] + unit: "seconds" + } + } + } + read_from: { + common: true + description: "In the absence of a checkpoint, this setting tells Vector where to start reading files that are present at startup." + name: "read_from" + required: false + warnings: [] + type: { + string: { + syntax: "literal" + default: "beginning" + enum: { + beginning: "Read from the beginning of the file." + end: "Start reading from the current end of the file." + } + } + } + } + multiline: { + common: false + category: "Multiline" + description: "Multiline parsing configuration. If not specified, multiline parsing is disabled." + name: "multiline" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + condition_pattern: { + description: "Condition regex pattern to look for. Exact behavior is configured via `mode`." + name: "condition_pattern" + required: true + warnings: [] + sort: 3 + type: { + string: { + examples: ["^[\\s]+", "\\\\$", "^(INFO|ERROR) ", ";$"] + syntax: "regex" + } + } + } + mode: { + description: "Mode of operation, specifies how the `condition_pattern` is interpreted." + name: "mode" + required: true + warnings: [] + sort: 2 + type: { + string: { + enum: { + continue_through: "All consecutive lines matching this pattern are included in the group. The first line (the line that matched the start pattern) does not need to match the `ContinueThrough` pattern. This is useful in cases such as a Java stack trace, where some indicator in the line (such as leading whitespace) indicates that it is an extension of the preceding line." + continue_past: "All consecutive lines matching this pattern, plus one additional line, are included in the group. This is useful in cases where a log message ends with a continuation marker, such as a backslash, indicating that the following line is part of the same message." + halt_before: "All consecutive lines not matching this pattern are included in the group. This is useful where a log line contains a marker indicating that it begins a new message." + halt_with: "All consecutive lines, up to and including the first line matching this pattern, are included in the group. This is useful where a log line ends with a termination marker, such as a semicolon." + } + examples: ["continue_through", "continue_past", "halt_before", "halt_with"] + syntax: "literal" + } + } + } + start_pattern: { + description: "Start regex pattern to look for as a beginning of the message." + name: "start_pattern" + required: true + warnings: [] + sort: 1 + type: { + string: { + examples: ["^[^\\s]", "\\\\$", "^(INFO|ERROR) ", "[^;]$"] + syntax: "regex" + } + } + } + timeout_ms: { + description: "The maximum time to wait for the continuation. Once this timeout is reached, the buffered message is guaranteed to be flushed, even if incomplete." + name: "timeout_ms" + required: true + warnings: [] + sort: 4 + type: { + uint: { + examples: [1_000, 600_000] + unit: "milliseconds" + } + } + } + } + } + } + } + data_dir: { + common: false + description: "The directory used to persist file checkpoint positions. By default, the global `data_dir` option is used. Please make sure the Vector project has write permissions to this dir." + name: "data_dir" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/var/lib/vector"] + syntax: "file_system_path" + } + } + } + encoding: { + common: false + category: "Encoding" + description: "Configures the encoding specific source behavior." + name: "encoding" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + charset: { + common: false + description: "Encoding of the source messages. Takes one of the encoding [label strings](https://encoding.spec.whatwg.org/#concept-encoding-get) defined as part of the [Encoding Standard](https://encoding.spec.whatwg.org/). When set, the messages are transcoded from the specified encoding to UTF-8, which is the encoding vector assumes internally for string-like data. Enable this transcoding operation if you need your data to be in UTF-8 for further processing. At the time of transcoding, any malformed sequences (that can't be mapped to UTF-8) will be replaced with [replacement character](https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character) and warnings will be logged." + name: "charset" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["utf-16le", "utf-16be"] + syntax: "literal" + } + } + } + } + } + } + } + ignore_checkpoints: { + common: false + description: "This causes Vector to ignore existing checkpoints when determining where to start reading a file. Checkpoints are still written normally." + name: "ignore_checkpoints" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + file: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "file" + #ExampleConfig: { + title: string + configuration: { + exclude: null + file_key: null + fingerprint: null + glob_minimum_cooldown: null + host_key: null + ignore_not_found: null + ignore_older: null + include: null + line_delimiter: null + max_line_bytes: null + max_read_bytes: null + oldest_first: null + remove_after: null + read_from: null + ignore_checkpoints: null + type: null + multiline: null + data_dir: null + encoding: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual line from a file. Lines can be merged using the `multiline` options." + name: "line" + fields: { + file: { + description: "The absolute path of originating file." + name: "file" + required: true + warnings: [] + type: { + string: { + examples: ["/var/log/apache/access.log"] + syntax: "literal" + } + } + } + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + message: { + description: "The raw line from the file." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + examples: [{ + title: "Apache Access Log" + configuration: { + include: ["/var/log/**/*.log"] + exclude: null + file_key: null + fingerprint: null + glob_minimum_cooldown: null + host_key: null + ignore_not_found: null + ignore_older: null + line_delimiter: null + max_line_bytes: null + max_read_bytes: null + oldest_first: null + remove_after: null + read_from: null + ignore_checkpoints: null + type: null + multiline: null + data_dir: null + encoding: null + } + input: """ + ```text filename="/var/log/apache/access.log" + 53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] "GET /disintermediate HTTP/2.0" 401 20308 + ``` + """ + output: { + log: { + file: "/var/log/apache/access.log" + host: "my-host.local" + message: "53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308" + timestamp: "2020-10-10T17:07:36.452332Z" + } + } + }] + how_it_works: { + autodiscover: { + #Subsection: { + title: string + body: string + } + name: "autodiscover" + title: "Autodiscovery" + body: """ + Vector will continually look for new files matching any of your + include patterns. The frequency is controlled via the + `glob_minimum_cooldown` option. If a new file is added that matches + any of the supplied patterns, Vector will begin tailing it. Vector + maintains a unique list of files and will not tail a file more than + once, even if it matches multiple patterns. You can read more about + how we identify files in the Identification section. + """ + } + compressed_files: { + #Subsection: { + title: string + body: string + } + name: "compressed_files" + title: "Compressed Files" + body: """ + Vector will transparently detect files which have been compressed + using Gzip and decompress them for reading. This detection process + looks for the unique sequence of bytes in the Gzip header and does + not rely on the compressed files adhering to any kind of naming + convention. + + One caveat with reading compressed files is that Vector is not able + to efficiently seek into them. Rather than implement a + potentially-expensive full scan as a seek mechanism, Vector + currently will not attempt to make further reads from a file for + which it has already stored a checkpoint in a previous run. For + this reason, users should take care to allow Vector to fully + process anycompressed files before shutting the process down or moving the + files to another location on disk. + """ + } + file_deletion: { + #Subsection: { + title: string + body: string + } + name: "file_deletion" + title: "File Deletion" + body: """ + When a watched file is deleted, Vector will maintain its open file + handle and continue reading until it reaches `EOF`. When a file is + no longer findable in the `includes` option and the reader has + reached `EOF`, that file's reader is discarded. + """ + } + file_read_order: { + #Subsection: { + title: string + body: string + } + name: "file_read_order" + title: "File Read Order" + body: """ + By default, Vector attempts to allocate its read bandwidth fairly + across all of the files it's currently watching. This prevents a + single very busy file from starving other independent files from + being read. In certain situations, however, this can lead to + interleaved reads from files that should be read one after the + other. + + For example, consider a service that logs to timestamped file, + creating a new one at an interval and leaving the old one as-is. + Under normal operation, Vector would follow writes as they happen to + each file and there would be no interleaving. In an overload + situation, however, Vector may pick up and begin tailing newer files + before catching up to the latest writes from older files. This would + cause writes from a single logical log stream to be interleaved in + time and potentially slow down ingestion as a whole, since the fixed + total read bandwidth is allocated across an increasing number of + files. + + To address this type of situation, Vector provides the + `oldest_first` option. When set, Vector will not read from any file + younger than the oldest file that it hasn't yet caught up to. In + other words, Vector will continue reading from older files as long + as there is more data to read. Only once it hits the end will it + then move on to read from younger files. + + Whether or not to use the oldest_first flag depends on the + organization of the logs you're configuring Vector to tail. If your + `include` option contains multiple independent logical log streams + (e.g. Nginx's access.log and error.log, or logs from multiple + services), you are likely better off with the default behavior. If + you're dealing with a single logical log stream or if you value + per-stream ordering over fairness across streams, consider setting + the `oldest_first` option to true. + """ + } + file_rotation: { + #Subsection: { + title: string + body: string + } + name: "file_rotation" + title: "File Rotation" + body: """ + Vector supports tailing across a number of file rotation strategies. + The default behavior of `logrotate` is simply to move the old log + file and create a new one. This requires no special configuration of + Vector, as it will maintain its open file handle to the rotated log + until it has finished reading and it will find the newly created + file normally. + + A popular alternative strategy is `copytruncate`, in which + `logrotate` will copy the old log file to a new location before + truncating the original. Vector will also handle this well out of + the box, but there are a couple configuration options that will help + reduce the very small chance of missed data in some edge cases. We + recommend a combination of delaycompress (if applicable) on the + `logrotate` side and including the first rotated file in Vector's + `include` option. This allows Vector to find the file after rotation, + read it uncompressed to identify it, and then ensure it has all of + the data, including any written in a gap between Vector's last read + and the actual rotation event. + """ + } + fingerprint: { + #Subsection: { + title: string + body: string + } + name: "fingerprint" + title: "fingerprint" + body: """ + By default, Vector identifies files by creating a + [cyclic redundancy check](urls.crc) (CRC) on the first 256 bytes of + the file. This serves as a fingerprint to uniquely identify the file. + The amount of bytes read can be controlled via the `fingerprint_bytes` + and `ignored_header_bytes` options. + + This strategy avoids the common pitfalls of using device and inode + names since inode names can be reused across files. This enables + Vector to properly tail files across various rotation strategies. + """ + } + globbing: { + #Subsection: { + title: string + body: string + } + name: "globbing" + title: "Globbing" + body: """ + [Globbing](https://en.wikipedia.org/wiki/Glob_(programming)) is supported in all provided file paths, + files will be autodiscovered continually at a rate defined by the + `glob_minimum_cooldown` option. + """ + } + line_delimiters: { + #Subsection: { + title: string + body: string + } + name: "line_delimiters" + title: "Line Delimiters" + body: """ + Each line is read until a new line delimiter (by default, ` + ` i.e. + the `0xA` byte) or `EOF` is found. If needed, the default line + delimiter can be overriden via the `line_delimiter` option. + """ + } + multiline_messages: { + #Subsection: { + title: string + body: string + } + name: "multiline_messages" + title: "Multiline Messages" + body: """ + Sometimes a single log event will appear as multiple log lines. To + handle this, Vector provides a set of `multiline` options. These + options were carefully thought through and will allow you to solve the + simplest and most complex cases. Let's look at a few examples: + """ + sub_sections: [{ + title: "Example 1: Ruy Exceptions" + body: """ + Ruby exceptions, when logged, consist of multiple lines: + + ```text + foobar.rb:6:in `/': divided by 0 (ZeroDivisionError) + \tfrom foobar.rb:6:in `bar' + \tfrom foobar.rb:2:in `foo' + \tfrom foobar.rb:9:in `
' + ``` + + To consume these lines as a single event, use the following Vector + configuration: + + ```toml + [sources.my_file_source] + \ttype = "file" + \t# ... + + \t[sources.my_file_source.multiline] + \t\tstart_pattern = '^[^\\s]' + \t\tmode = "continue_through" + \t\tcondition_pattern = '^[\\s]+from' + \t\ttimeout_ms = 1000 + ``` + + * `start_pattern`, set to `^[^\\s]`, tells Vector that new + \tmulti-line events should _not_ start with white-space. + * `mode`, set to `continue_through`, tells Vector continue + \taggregating lines until the `condition_pattern` is no longer + \tvalid (excluding the invalid line). + * `condition_pattern`, set to `^[\\s]+from`, tells Vector to + \tcontinue aggregating lines if they start with white-space + \tfollowed by `from`. + """ + }, { + title: "Example 2: Line Continuations" + body: """ + Some programming languages use the backslash (`\\`) character to + signal that a line will continue on the next line: + + ```text + First line\\ + second line\\ + third line + ``` + + To consume these lines as a single event, use the following Vector + configuration: + + ```toml + [sources.my_file_source] + \ttype = "file" + \t# ... + + \t[sources.my_file_source.multiline] + \t\tstart_pattern = '\\\\$' + \t\tmode = "continue_past" + \t\tcondition_pattern = '\\\\$' + \t\ttimeout_ms = 1000 + ``` + + * `start_pattern`, set to `\\\\$`, tells Vector that new multi-line + \tevents start with lines that end in `\\`. + * `mode`, set to `continue_past`, tells Vector continue + \taggregating lines, plus one additional line, until + \t`condition_pattern` is false. + * `condition_pattern`, set to `\\\\$`, tells Vector to continue + \taggregating lines if they _end_ with a `\\` character. + """ + }, { + title: "Example 3: Line Continuations" + body: """ + Activity logs from services such as Elasticsearch typically begin + with a timestamp, followed by information on the specific + activity, as in this example: + + ```text + [2015-08-24 11:49:14,389][ INFO ][env ] [Letha] using [1] data paths, mounts [[/ + (/dev/disk1)]], net usable_space [34.5gb], net total_space [118.9gb], types [hfs] + ``` + + To consume these lines as a single event, use the following Vector + configuration: + + ```toml + [sources.my_file_source] + \ttype = "file" + \t# ... + + \t[sources.my_file_source.multiline] + \t\tstart_pattern = '^\\[[0-9]{4}-[0-9]{2}-[0-9]{2}' + \t\tmode = "halt_before" + \t\tcondition_pattern = '^\\[[0-9]{4}-[0-9]{2}-[0-9]{2}' + \t\ttimeout_ms = 1000 + ``` + + * `start_pattern`, set to `^\\[[0-9]{4}-[0-9]{2}-[0-9]{2}`, tells + \tVector that new multi-line events start with a timestamp + \tsequence. + * `mode`, set to `halt_before`, tells Vector to continue + \taggregating lines as long as the `condition_pattern` does not + \tmatch. + * `condition_pattern`, set to `^\\[[0-9]{4}-[0-9]{2}-[0-9]{2}`, + \ttells Vector to continue aggregating up until a line starts with + \ta timestamp sequence. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + checkpointing: { + #Subsection: { + title: string + body: string + } + name: "checkpointing" + title: "Checkpointing" + body: """ + Vector checkpoints the current read position after each + successful read. This ensures that Vector resumes where it left + off if restarted, preventing data from being read twice. The + checkpoint positions are stored in the data directory which is + specified via the global `data_dir` option, but can be overridden + via the `data_dir` option in the file source directly. + """ + } + read_position: { + #Subsection: { + title: string + body: string + } + name: "read_position" + title: "Read Position" + body: """ + By default, Vector will read from the beginning of newly discovered + files. You can change this behavior by setting the `read_from` option to + `"end"`. + + Previously discovered files will be [checkpointed](#checkpointing), and + the read position will resume from the last checkpoint. To disable this + behavior, you can set the `ignore_checkpoints` option to `true`. This + will cause Vector to disregard existing checkpoints when determining the + starting read position of a file. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `file` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + checkpoint_write_errors_total: { + description: "The total number of errors writing checkpoints." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checkpoint_write_errors_total" + } + checkpoints_total: { + description: "The total number of files checkpointed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checkpoints_total" + } + checksum_errors_total: { + description: "The total number of errors identifying files via checksum." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checksum_errors_total" + } + file_delete_errors_total: { + description: "The total number of failures to delete a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "file_delete_errors_total" + } + file_watch_errors_total: { + description: "The total number of errors encountered when watching files." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "file_watch_errors_total" + } + files_added_total: { + description: "The total number of files Vector has found to watch." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_added_total" + } + files_deleted_total: { + description: "The total number of files deleted." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_deleted_total" + } + files_resumed_total: { + description: "The total number of times Vector has resumed watching a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_resumed_total" + } + files_unwatched_total: { + description: "The total number of times Vector has stopped watching a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_unwatched_total" + } + fingerprint_read_errors_total: { + description: "The total number of times Vector failed to read a file for fingerprinting." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "fingerprint_read_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + glob_errors_total: { + description: "The total number of errors encountered when globbing paths." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + path: { + name: "path" + description: "The path that produced the error." + required: true + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "glob_errors_total" + } + } + } + } + prometheus_scrape: { + kind: "source" + title: "Prometheus Scrape" + alias: "prometheus" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Prometheus client" + thing: "a Prometheus client" + url: "https://prometheus.io/docs/instrumenting/clientlibs/" + versions: null + } + interface: { + socket: { + api: { + title: "Prometheus" + url: "https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Prometheus client context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + tls_collect: "Securely collects data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoints: { + description: "Endpoints to scrape metrics from." + name: "endpoints" + required: true + warnings: ["You must explicitly add the path to your endpoints. Vector will _not_ automatically add `/metics`."] + type: { + array: { + items: { + type: { + string: { + examples: ["http://localhost:9090/metrics"] + syntax: "literal" + } + } + } + } + } + } + scrape_interval_secs: { + common: true + description: "The interval between scrapes, in seconds." + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${PROMETHEUS_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${PROMETHEUS_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + prometheus_scrape: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "prometheus_scrape" + #ExampleConfig: { + title: string + configuration: { + endpoints: null + scrape_interval_secs: null + auth: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + counter: { + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "counter" + type: "counter" + default_namespace: "vector" + } + gauge: { + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "gauge" + type: "gauge" + default_namespace: "vector" + } + histogram: { + description: """ + Also called a "timer". A histogram samples + observations (usually things like request + durations or response sizes) and counts them + in configurable buckets. It also provides a + sum of all observed values. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "histogram" + type: "gauge" + default_namespace: "vector" + } + summary: { + description: """ + Similar to a histogram, a summary samples + observations (usually things like request + durations and response sizes). While it also + provides a total count of observations and a + sum of all observed values, it calculates + configurable quantiles over a sliding time + window. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "summary" + type: "gauge" + default_namespace: "vector" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `prometheus_scrape` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + generator: { + kind: "source" + title: "Generator" + description: "Generates fakes events, useful for testing, benchmarking, and demoing." + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + generate: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + format: { + description: "The format of the randomly generated output." + name: "format" + required: true + warnings: [] + type: { + string: { + enum: { + shuffle: "Lines are chosen at random from the list specified using `lines`." + apache_common: "Randomly generated logs in [Apache common](https://httpd.apache.org/docs/1.3/logs.html#common) format." + apache_error: "Randomly generated logs in [Apache error](https://httpd.apache.org/docs/1.3/logs.html#errorlog) format." + syslog: "Randomly generated logs in Syslog format ([RFC 5424](https://tools.ietf.org/html/rfc5424))." + bsd_syslog: "Randomly generated logs in Syslog format ([RFC 3164](https://tools.ietf.org/html/rfc3164))." + json: "Randomly generated HTTP server logs in [JSON](https://en.wikipedia.org/wiki/JSON) format." + } + examples: ["shuffle", "apache_common", "apache_error", "syslog", "bsd_syslog", "json"] + syntax: "literal" + } + } + } + interval: { + common: false + description: "The amount of time, in seconds, to pause between each batch of output lines. If not set, there is no delay." + name: "interval" + required: false + warnings: [] + type: { + float: { + default: null + examples: [1.0, 0.1, 0.01] + } + } + } + count: { + common: false + description: "The total number of lines to output. By default the source continuously prints logs (infinitely)." + name: "count" + required: false + warnings: [] + type: { + uint: { + default: null + unit: null + } + } + } + lines: { + common: false + description: "The list of lines to output." + name: "lines" + relevant_when: "`format` = `shuffle`" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["Line 1", "Line 2"] + syntax: "literal" + } + } + } + } + } + } + sequence: { + common: false + relevant_when: "`format` = `shuffle`" + description: "If `true`, each output line starts with an increasing sequence number, beginning with 0." + name: "sequence" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + generator: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "generator" + #ExampleConfig: { + title: string + configuration: { + format: null + interval: null + count: null + lines: null + sequence: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: {} + } + telemetry: { + metrics: { + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `generator` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + journald: { + kind: "source" + title: "Journald" + classes: { + commonly_used: true + delivery: "at_least_once" + deployment_roles: ["daemon"] + development: "stable" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: true + } + from: { + service: { + name: "JournalD" + thing: "JournalD" + url: "https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html" + versions: null + description: "[Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) is a utility for accessing log data across a variety of system services. It was introduced with [Systemd](https://systemd.io/) to help system administrators collect, access, and route log data." + } + interface: { + binary: { + name: "journalctl" + permissions: { + unix: { + group: "systemd-journal" + } + } + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful JournalD context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": false + "x86_64-pc-windows-msv": false + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + batch_size: { + common: false + description: "The systemd journal is read in batches, and a checkpoint is set at the end of each batch. This option limits the size of the batch." + name: "batch_size" + required: false + warnings: [] + type: { + uint: { + default: 16 + unit: null + } + } + } + current_boot_only: { + common: true + description: "Include only entries from the current boot." + name: "current_boot_only" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + exclude_units: { + common: true + description: "The list of unit names to exclude from monitoring. Unit names lacking a `\".\"` will have `\".service\"` appended to make them a valid service unit name." + name: "exclude_units" + required: false + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["badservice", "sysinit.target"] + syntax: "literal" + } + } + } + } + } + } + include_units: { + common: true + description: "The list of unit names to monitor. If empty or not present, all units are accepted. Unit names lacking a `\".\"` will have `\".service\"` appended to make them a valid service unit name." + name: "include_units" + required: false + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["ntpd", "sysinit.target"] + syntax: "literal" + } + } + } + } + } + } + data_dir: { + common: false + description: "The directory used to persist file checkpoint positions. By default, the global `data_dir` option is used. Please make sure the Vector project has write permissions to this dir." + name: "data_dir" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/var/lib/vector"] + syntax: "file_system_path" + } + } + } + journalctl_path: { + common: false + description: "The full path of the `journalctl` executable. If not set, Vector will search the path for `journalctl`." + name: "journalctl_path" + required: false + warnings: [] + type: { + string: { + default: "journalctl" + examples: ["/usr/local/bin/journalctl"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + journald: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "journald" + #ExampleConfig: { + title: string + configuration: { + batch_size: null + current_boot_only: null + exclude_units: null + include_units: null + journalctl_path: null + type: null + data_dir: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + event: { + description: "A Journald event" + name: "event" + fields: { + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + message: { + description: "The raw line from the file." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + "*": { + common: false + description: "Any Journald field" + name: "*" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/usr/sbin/ntpd", "c36e9ea52800a19d214cb71b53263a28"] + syntax: "literal" + } + } + } + } + } + } + } + examples: [{ + title: "Sample Output" + configuration: { + batch_size: null + current_boot_only: null + exclude_units: null + include_units: null + journalctl_path: null + type: null + data_dir: null + } + input: """ + ```text + 2019-07-26 20:30:27 reply from 192.168.1.2: offset -0.001791 delay 0.000176, next query 1500s + ``` + """ + output: [{ + log: { + timestamp: "2020-10-10T17:07:36.452332Z" + message: "reply from 192.168.1.2: offset -0.001791 delay 0.000176, next query 1500s" + host: "my-host.local" + "__REALTIME_TIMESTAMP": "1564173027000443" + "__MONOTONIC_TIMESTAMP": "98694000446" + "_BOOT_ID": "124c781146e841ae8d9b4590df8b9231" + SYSLOG_FACILITY: "3" + "_UID": "0" + "_GID": "0" + "_CAP_EFFECTIVE": "3fffffffff" + "_MACHINE_ID": "c36e9ea52800a19d214cb71b53263a28" + PRIORITY: "6" + "_TRANSPORT": "stdout" + "_STREAM_ID": "92c79f4b45c4457490ebdefece29995e" + SYSLOG_IDENTIFIER: "ntpd" + "_PID": "2156" + "_COMM": "ntpd" + "_EXE": "/usr/sbin/ntpd" + "_CMDLINE": "ntpd: [priv]" + "_SYSTEMD_CGROUP": "/system.slice/ntpd.service" + "_SYSTEMD_UNIT": "ntpd.service" + "_SYSTEMD_SLICE": "system.slice" + "_SYSTEMD_INVOCATION_ID": "496ad5cd046d48e29f37f559a6d176f8" + } + }] + }] + how_it_works: { + communication_strategy: { + #Subsection: { + title: string + body: string + } + name: "communication_strategy" + title: "Communication Strategy" + body: """ + To ensure the `journald` source works across all platforms, Vector interacts + with the Systemd journal via the `journalctl` command. This is accomplished by + spawning a [subprocess][urls.rust_subprocess] that Vector interacts + with. If the `journalctl` command is not in the environment path you can + specify the exact location via the `journalctl_path` option. For more + information on this communication strategy please see + [issue #1473][urls.issue_1473]. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + checkpointing: { + #Subsection: { + title: string + body: string + } + name: "checkpointing" + title: "Checkpointing" + body: """ + Vector checkpoints the current read position after each + successful read. This ensures that Vector resumes where it left + off if restarted, preventing data from being read twice. The + checkpoint positions are stored in the data directory which is + specified via the global `data_dir` option, but can be overridden + via the `data_dir` option in the file source directly. + """ + } + non_ascii: { + #Subsection: { + title: string + body: string + } + name: "non_ascii" + title: "Non-ASCII Messages" + body: """ + When `journald` has stored a message that is not strict ASCII, + `journalctl` will output it in an alternate format to prevent data + loss. Vector handles this alternate format by translating such messages + into UTF-8 in "lossy" mode, where characters that are not valid UTF-8 + are replaced with the Unicode replacement character, `�`. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `journald` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + invalid_record_total: { + description: "The total number of invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_total" + } + invalid_record_bytes_total: { + description: "The total number of bytes from invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_bytes_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + } + } + } + internal_metrics: { + kind: "source" + title: "Internal Metrics" + description: """ + Exposes Vector's own internal metrics, allowing you to collect, process, + and route Vector's internal metrics just like other metrics. + """ + classes: { + commonly_used: true + delivery: "at_least_once" + deployment_roles: ["aggregator", "daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Vector" + thing: "a Vector instance" + url: "https://vector.dev/docs/" + versions: ">= 0.11.0" + connect_to: { + splunk: { + logs: { + setup: [{ + title: "Create a Splunk HEC endpoint" + description: "Follow the Splunk HEC setup docs to create a Splunk HEC endpoint." + detour: { + url: "https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector" + } + }, { + title: "Configure Vector" + description: """ + Splunk will provide you with a host and token. Copy those + values to the `host` and `token` options. + """ + vector: { + configure: { + sinks: { + splunk_hec: { + type: "splunk_hec" + host: "" + token: "" + } + } + } + } + }] + } + } + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Vector context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + notices: [] + requirements: [] + warnings: [] + } + installation: { + platform_name: null + } + configuration: { + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + internal_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "internal_metrics" + #ExampleConfig: { + title: string + configuration: { + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + api_started_total: { + description: "The number of times the Vector GraphQL API has been started." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "api_started_total" + } + config_load_errors_total: { + description: "The total number of errors loading the Vector configuration." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "config_load_errors_total" + } + connection_errors_total: { + description: "The total number of connection errors for this Vector instance." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_errors_total" + } + connection_established_total: { + description: "The total number of times a connection has been established." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_established_total" + } + connection_failed_total: { + description: "The total number of times a connection has failed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_failed_total" + } + connection_send_errors_total: { + description: "The total number of errors sending data via the connection." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_send_errors_total" + } + connection_shutdown_total: { + description: "The total number of times the connection has been shut down." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_shutdown_total" + } + quit_total: { + description: "The total number of times the Vector instance has quit." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "quit_total" + } + recover_errors_total: { + description: "The total number of errors caused by Vector failing to recover from a failed reload." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "recover_errors_total" + } + reload_errors_total: { + description: "The total number of errors encountered when reloading Vector." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "reload_errors_total" + } + reloaded_total: { + description: "The total number of times the Vector instance has been reloaded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "reloaded_total" + } + started_total: { + description: "The total number of times the Vector instance has been started." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "started_total" + } + stopped_total: { + description: "The total number of times the Vector instance has been stopped." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "stopped_total" + } + adaptive_concurrency_averaged_rtt: { + description: "The average round-trip time (RTT) from the HTTP sink across the current window." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "adaptive_concurrency_averaged_rtt" + } + adaptive_concurrency_in_flight: { + description: "The number of outbound requests from the HTTP sink currently awaiting a response." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "adaptive_concurrency_in_flight" + } + adaptive_concurrency_limit: { + description: "The concurrency limit that the adaptive concurrency feature has decided on for this current window." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "adaptive_concurrency_limit" + } + adaptive_concurrency_observed_rtt: { + description: "The observed round-trip time (RTT) for requests from this HTTP sink." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "adaptive_concurrency_observed_rtt" + } + checkpoint_write_errors_total: { + description: "The total number of errors writing checkpoints." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checkpoint_write_errors_total" + } + checkpoints_total: { + description: "The total number of files checkpointed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checkpoints_total" + } + checksum_errors_total: { + description: "The total number of errors identifying files via checksum." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "checksum_errors_total" + } + collect_completed_total: { + description: "The total number of metrics collections completed for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_completed_total" + } + collect_duration_nanoseconds: { + description: "The duration spent collecting of metrics for this component." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_duration_nanoseconds" + } + communication_errors_total: { + description: "The total number of errors stemming from communication with the Docker daemon." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "communication_errors_total" + } + connection_read_errors_total: { + description: "The total number of errors reading datagram." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + mode: { + name: "mode" + description: "" + required: true + enum: { + udp: "User Datagram Protocol" + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "connection_read_errors_total" + } + consumer_offset_updates_failed_total: { + description: "The total number of failures to update a Kafka consumer offset." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "consumer_offset_updates_failed_total" + } + container_processed_events_total: { + description: "The total number of container events processed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "container_processed_events_total" + } + container_metadata_fetch_errors_total: { + description: "The total number of errors encountered when fetching container metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "container_metadata_fetch_errors_total" + } + containers_unwatched_total: { + description: "The total number of times Vector stopped watching for container logs." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "containers_unwatched_total" + } + containers_watched_total: { + description: "The total number of times Vector started watching for container logs." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "containers_watched_total" + } + k8s_format_picker_edge_cases_total: { + description: "The total number of edge cases encountered while picking format of the Kubernetes log message." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_format_picker_edge_cases_total" + } + k8s_docker_format_parse_failures_total: { + description: "The total number of failures to parse a message as a JSON object." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_docker_format_parse_failures_total" + } + k8s_event_annotation_failures_total: { + description: "The total number of failures to annotate Vector events with Kubernetes Pod metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_event_annotation_failures_total" + } + encode_errors_total: { + description: "The total number of errors encountered when encoding an event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "encode_errors_total" + } + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + events_failed_total: { + description: "The total number of failures to read a Kafka message." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_failed_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + file_delete_errors_total: { + description: "The total number of failures to delete a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "file_delete_errors_total" + } + file_watch_errors_total: { + description: "The total number of errors encountered when watching files." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "file_watch_errors_total" + } + files_added_total: { + description: "The total number of files Vector has found to watch." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_added_total" + } + files_deleted_total: { + description: "The total number of files deleted." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_deleted_total" + } + files_resumed_total: { + description: "The total number of times Vector has resumed watching a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_resumed_total" + } + files_unwatched_total: { + description: "The total number of times Vector has stopped watching a file." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "files_unwatched_total" + } + fingerprint_read_errors_total: { + description: "The total number of times Vector failed to read a file for fingerprinting." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "fingerprint_read_errors_total" + } + glob_errors_total: { + description: "The total number of errors encountered when globbing paths." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + path: { + name: "path" + description: "The path that produced the error." + required: true + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "glob_errors_total" + } + http_bad_requests_total: { + description: "The total number of HTTP `400 Bad Request` errors encountered." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_bad_requests_total" + } + http_error_response_total: { + description: "The total number of HTTP error responses for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_error_response_total" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + http_requests_total: { + description: "The total number of HTTP requests issued by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "http_requests_total" + } + invalid_record_total: { + description: "The total number of invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_total" + } + invalid_record_bytes_total: { + description: "The total number of bytes from invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_bytes_total" + } + logging_driver_errors_total: { + description: """ + The total number of logging driver errors encountered caused by not using either + the `jsonfile` or `journald` driver. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "logging_driver_errors_total" + } + memory_used_bytes: { + description: "The total memory currently being used by Vector (in bytes)." + type: "gauge" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "memory_used_bytes" + } + metadata_refresh_failed_total: { + description: "The total number of failed efforts to refresh AWS EC2 metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "metadata_refresh_failed_total" + } + metadata_refresh_successful_total: { + description: "The total number of AWS EC2 metadata refreshes." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "metadata_refresh_successful_total" + } + missing_keys_total: { + description: "The total number of failed template renders due to missed keys from the event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "missing_keys_total" + } + open_connections: { + description: "The number of current open connections to Vector." + type: "gauge" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "open_connections" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + protobuf_decode_errors_total: { + description: "The total number of [Protocol Buffers](https://developers.google.com/protocol-buffers) errors thrown during communication between Vector instances." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "protobuf_decode_errors_total" + } + request_errors_total: { + description: "The total number of requests errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_errors_total" + } + request_duration_nanoseconds: { + description: "The total request duration in nanoseconds." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_duration_nanoseconds" + } + request_read_errors_total: { + description: "The total number of request read errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "request_read_errors_total" + } + requests_completed_total: { + description: "The total number of requests completed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_completed_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + send_errors_total: { + description: "The total number of errors sending messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "send_errors_total" + } + sqs_message_delete_failed_total: { + description: "The total number of failures to delete SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_delete_failed_total" + } + sqs_message_delete_succeeded_total: { + description: "The total number of successful deletions of SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_delete_succeeded_total" + } + sqs_message_processing_failed_total: { + description: "The total number of failures to process SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_processing_failed_total" + } + sqs_message_processing_succeeded_total: { + description: "The total number of SQS messages successfully processed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_processing_succeeded_total" + } + sqs_message_receive_failed_total: { + description: "The total number of failures to receive SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_receive_failed_total" + } + sqs_message_receive_succeeded_total: { + description: "The total number of times successfully receiving SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_receive_succeeded_total" + } + sqs_message_received_messages_total: { + description: "The total number of received SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_received_messages_total" + } + sqs_s3_event_record_ignored_total: { + description: "The total number of times an S3 record in an SQS message was ignored (for an event that was not `ObjectCreated`)." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + ignore_type: { + name: "ignore_type" + description: "The reason for ignoring the S3 record" + required: true + enum: { + invalid_event_kind: "The kind of invalid event." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_s3_event_record_ignored_total" + } + stale_events_flushed_total: { + description: "The number of stale events that Vector has flushed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "stale_events_flushed_total" + } + stdin_reads_failed_total: { + description: "The total number of errors reading from stdin." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "stdin_reads_failed_total" + } + tag_value_limit_exceeded_total: { + description: """ + The total number of events discarded because the tag has been rejected after + hitting the configured `value_limit`. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "tag_value_limit_exceeded_total" + } + timestamp_parse_errors_total: { + description: "The total number of errors encountered parsing [RFC 3339](https://tools.ietf.org/html/rfc3339) timestamps." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "timestamp_parse_errors_total" + } + uptime_seconds: { + description: "The total number of seconds the Vector instance has been up." + type: "gauge" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "uptime_seconds" + } + utf8_convert_errors_total: { + description: "The total number of errors converting bytes to a UTF-8 string in UDP mode." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + mode: { + name: "mode" + description: "The connection mode used by the component." + required: true + enum: { + udp: "User Datagram Protocol" + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "utf8_convert_errors_total" + } + value_limit_reached_total: { + description: """ + The total number of times new values for a key have been rejected because the + value limit has been reached. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "value_limit_reached_total" + } + windows_service_does_not_exist_total: { + description: """ + The total number of errors raised due to the Windows service not + existing. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_does_not_exist_total" + } + windows_service_install_total: { + description: "The total number of times the Windows service has been installed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_install_total" + } + windows_service_restart_total: { + description: "The total number of times the Windows service has been restarted." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_restart_total" + } + windows_service_start_total: { + description: "The total number of times the Windows service has been started." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_start_total" + } + windows_service_stop_total: { + description: "The total number of times the Windows service has been stopped." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_stop_total" + } + windows_service_uninstall_total: { + description: "The total number of times the Windows service has been uninstalled." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "windows_service_uninstall_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `internal_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + aws_kinesis_firehose: { + kind: "source" + title: "AWS Kinesis Firehose" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["aggregator"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "AWS Kinesis Firehose" + thing: "a AWS Kinesis Firehose stream" + url: "https://aws.amazon.com/kinesis/data-firehose/" + versions: null + description: """ + [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) is a fully + managed service for delivering real-time streaming data to destinations + such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, + Amazon Elasticsearch Service (Amazon ES), and Splunk. + """ + connect_to: { + vector: { + logs: { + setup: [{ + title: "Configure Vector to accept AWS Kinesis Firehose data" + vector: { + configure: { + sources: { + aws_kinesis_firehose: { + type: "aws_kinesis_firehose" + address: "0.0.0.0:443" + access_key: "A94A8FE5CCB19BA61C4C08" + region: "us-east-1" + } + } + } + } + }, { + title: "Configure TLS termination" + description: """ + AWS Kinesis Firehose will only forward to HTTPS (and not HTTP) + endpoints running on port 443. You will need to either put a load + balancer in front of the Vector instance to handle TLS termination + or configure the `tls` options of the Vector `aws_kinesis_firehose` + source to serve a valid certificate. + """ + detour: { + url: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + } + }, { + title: "Create an AWS Kinesis Firehose HTTP Stream" + description: """ + Using your previously configured TLS enabled HTTP endpoint, + let's create a Kinesis Firehose HTTP stream that delivers + data to it. Be sure to use your HTTP endpoint. + """ + detour: { + url: "https://aws.amazon.com/blogs/big-data/stream-data-to-an-http-endpoint-with-amazon-kinesis-data-firehose/" + } + }] + } + } + } + } + interface: { + socket: { + api: { + title: "AWS Kinesis Firehose HTTP Destination" + url: "https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html" + } + direction: "incoming" + port: 443 + protocols: ["http"] + ssl: "required" + } + } + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful AWS Kinesis Firehose context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [""" + AWS Kinesis Firehose can only deliver data over HTTP. You will need + to solve TLS termination by fronting Vector with a load balaner or + configuring the `tls.*` options. + """] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to listen for connections on" + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:443", "localhost:443"] + syntax: "literal" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + access_key: { + common: true + description: """ + AWS Kinesis Firehose can be configured to pass along an access + key to authenticate requests. If configured, `access_key` should + be set to the same value. If not specified, vector will treat + all requests as authenticated. + """ + name: "access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["A94A8FE5CCB19BA61C4C08"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_kinesis_firehose: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "aws_kinesis_firehose" + #ExampleConfig: { + title: string + configuration: { + address: null + access_key: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "One event will be published per incoming AWS Kinesis Firehose record." + name: "line" + fields: { + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + message: { + description: "The raw record from the incoming payload." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["Started GET / for 127.0.0.1 at 2012-03-10 14:28:14 +0100"] + syntax: "literal" + } + } + } + request_id: { + description: "The AWS Kinesis Firehose request ID, value of the `X-Amz-Firehose-Request-Id` header." + name: "request_id" + required: true + warnings: [] + type: { + string: { + examples: ["ed1d787c-b9e2-4631-92dc-8e7c9d26d804"] + syntax: "literal" + } + } + } + source_arn: { + description: "The AWS Kinises Firehose delivery stream that issued the request, value of the `X-Amz-Firehose-Source-Arn` header." + name: "source_arn" + required: true + warnings: [] + type: { + string: { + examples: ["arn:aws:firehose:us-east-1:111111111111:deliverystream/test"] + syntax: "literal" + } + } + } + } + } + } + } + examples: [{ + title: "AWS CloudWatch Subscription message" + configuration: { + address: "0.0.0.0:443" + access_key: null + type: null + tls: null + } + input: """ + ```json + \t{ + \t\t"requestId": "ed1d787c-b9e2-4631-92dc-8e7c9d26d804", + \t\t"timestamp": 1600110760138, + \t\t"records": [ + \t\t\t{ + \t\t\t\t"data": "H4sIABk1bV8AA52TzW7bMBCE734KQ2db/JdI3QzETS8FAtg91UGgyOuEqCQq5Mqua+TdS8lu0hYNUpQHAdoZDcn9tKfJdJo0EEL5AOtjB0kxTa4W68Xdp+VqtbheJrPB4A4t+EFiv6yzVLuHa+/6blARAr5UV+ihbH4vh/4+VN52aF37wdYIPkTDlyhF8SrabFsOWhIrtz+Dlnto8dV3Gp9RstshXKhMi0xpqk3GpNJccpFRKYw0WvCM5kIbzrVWipm4VK55rrSk44HGHLTx/lg2wxVYRiljVGWGCvPiuPRn2O60Se6P8UKbpOBZrulsk2xLhCEjljYJk2QFHeGU04KxQqpCsumcSko3SfQ+uoBnn8pTJmjKWZYyI0axAXx021G++bweS5136CpXj8WP6/UNYek5ycMOPPhReETsQkHI4XBIO2/bynZlXXkXwryrS9w536TWkab0XwED6e/tU2/R9eGS9NTD5VgEvnWwtQikcu0e/AO0FYyu4HpfwR3Gf2R0Btza9qxgiUNUISiLr30AP7fbyMzu7OWA803ynIzdfJ69B1EZpoVhsWMRZ8a5UVJoRoUyUlDNspxzZWiEnOXiXYiSvQOR5TnN/xsiNalmKZcy5Yr/yfB6+RZD/gbDC0IbOx8wQrMhxGGYx4lBW5X1wJBLkpO981jWf6EXogvIrm+rYYrKOn4Hgbg4b439/s8cFeVvcNwBtHBkOdWvQIdRnTxPfgCXvyEgSQQAAA==" + \t\t\t} + \t\t] + \t} + ``` + """ + output: [{ + log: { + request_id: "ed1d787c-b9e2-4631-92dc-8e7c9d26d804" + source_arn: "arn:aws:firehose:us-east-1:111111111111:deliverystream/test" + timestamp: "2020-09-14T19:12:40.138Z" + message: "{\"messageType\":\"DATA_MESSAGE\",\"owner\":\"111111111111\",\"logGroup\":\"test\",\"logStream\":\"test\",\"subscriptionFilters\":[\"Destination\"],\"logEvents\":[{\"id\":\"35683658089614582423604394983260738922885519999578275840\",\"timestamp\":1600110569039,\"message\":\"{\\\"bytes\\\":26780,\\\"datetime\\\":\\\"14/Sep/2020:11:45:41 -0400\\\",\\\"host\\\":\\\"157.130.216.193\\\",\\\"method\\\":\\\"PUT\\\",\\\"protocol\\\":\\\"HTTP/1.0\\\",\\\"referer\\\":\\\"https://www.principalcross-platform.io/markets/ubiquitous\\\",\\\"request\\\":\\\"/expedite/convergence\\\",\\\"source_type\\\":\\\"stdin\\\",\\\"status\\\":301,\\\"user-identifier\\\":\\\"-\\\"}\"},{\"id\":\"35683658089659183914001456229543810359430816722590236673\",\"timestamp\":1600110569041,\"message\":\"{\\\"bytes\\\":17707,\\\"datetime\\\":\\\"14/Sep/2020:11:45:41 -0400\\\",\\\"host\\\":\\\"109.81.244.252\\\",\\\"method\\\":\\\"GET\\\",\\\"protocol\\\":\\\"HTTP/2.0\\\",\\\"referer\\\":\\\"http://www.investormission-critical.io/24/7/vortals\\\",\\\"request\\\":\\\"/scale/functionalities/optimize\\\",\\\"source_type\\\":\\\"stdin\\\",\\\"status\\\":502,\\\"user-identifier\\\":\\\"feeney1708\\\"}\"}]}" + } + }] + }] + how_it_works: { + structured_events: { + #Subsection: { + title: string + body: string + } + name: "structured_events" + title: "Forwarding CloudWatch Log events" + body: """ + This source is the recommended way to ingest logs from AWS + CloudWatch logs via [AWS CloudWatch Log + subscriptions][aws_cloudwatch_logs_subscriptions]. To + set this up: + + 1. Deploy vector with a publicly exposed HTTP endpoint using + this source. You will likely also want to use the + [`aws_cloudwatch_logs_subscription_parser`][vector_transform_aws_cloudwatch_logs_subscription_parser] + transform to extract the log events. Make sure to set + the `access_key` to secure this endpoint. Your + configuration might look something like: + + ```toml + \t\t[sources.firehose] + \t\t # General + \t\t type = "aws_kinesis_firehose" + \t\t address = "127.0.0.1:9000" + \t\t access_key = "secret" + + \t\t[transforms.cloudwatch] + \t\t type = "aws_cloudwatch_logs_subscription_parser" + \t\t inputs = ["firehose"] + + \t\t[sinks.console] + \t\t type = "console" + \t\t inputs = ["cloudwatch"] + \t\t encoding.codec = "json" + ``` + + 2. Create a Kinesis Firewatch delivery stream in the region + where the CloudWatch Logs groups exist that you want to + ingest. + 3. Set the stream to forward to your Vector instance via its + HTTP Endpoint destination. Make sure to configure the + same `access_key` you set earlier. + 4. Setup a [CloudWatch Logs + subscription][aws_cloudwatch_logs_subscriptions] to + forward the events to your delivery stream + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `aws_kinesis_firehose` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + request_read_errors_total: { + description: "The total number of request read errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "request_read_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + } + } + } + heroku_logs: { + kind: "source" + title: "Heroku Logplex" + description: "Receives log data from Heroku log drains via Heroku's logplex system." + alias: "logplex" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["aggregator"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "Heroku" + thing: "a Heroku app" + url: "https://devcenter.heroku.com/articles/logplex" + versions: null + description: """ + [Heroku](https://www.heroku.com) is a container-based platform for deploying and + managing applications. It's a platform as a service (PaaS) that is fully + managed, allowing developers on Heroku to focus on their applications + instead of their infrastructure. + """ + setup: [{ + title: "Setup a Heroku app" + description: "Setup a Heroku app by following the Heroku setup instructions." + detour: { + url: "https://devcenter.heroku.com/start" + } + }] + connect_to: { + vector: { + logs: { + setup: [{ + title: "Configure Vector to accept Heroku logs" + vector: { + configure: { + sources: { + logplex: { + type: "logplex" + address: "0.0.0.0:80" + } + } + } + } + }, { + title: "Configure TLS termination" + description: """ + It is _highly_ recommended to configure TLS termination for + your previously configured Vector logplex address. + + You should either put a load balancer in front of the Vector + instance to handle TLS termination or configure the `tls` options + of the Vector `logplex` source to serve a valid certificate. + """ + detour: { + url: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + } + }, { + title: "Setup a Heroku log drain" + description: """ + Using your exposed Vector HTTP address, create a [Heroku log drain](https://devcenter.heroku.com/articles/log-drains#https-drains) + that points to your Vector instance's address: + + ```bash + heroku drains:add https://:@
-a + ``` + """ + }] + } + } + } + } + interface: { + socket: { + api: { + title: "Syslog 6587" + url: "https://tools.ietf.org/html/rfc6587" + } + direction: "incoming" + port: 80 + protocols: ["http"] + ssl: "optional" + } + } + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful Heroku context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to accept connections on. The address _must_ include a port." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:80", "localhost:80"] + syntax: "literal" + } + } + } + auth: { + common: false + category: "Auth" + description: "Options for HTTP Basic Authentication." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + username: { + description: "The basic authentication user name." + name: "username" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + query_parameters: { + common: false + description: "A list of URL query parameters to include in the log event. These will override any values included in the body with conflicting names." + name: "query_parameters" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["application", "source"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + heroku_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "heroku_logs" + #ExampleConfig: { + title: string + configuration: { + address: null + auth: null + query_parameters: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual event from a batch of events received through an HTTP POST request." + name: "line" + fields: { + app_name: { + description: "The app name field extracted from log message." + name: "app_name" + required: true + warnings: [] + type: { + string: { + examples: ["erlang"] + syntax: "literal" + } + } + } + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + message: { + description: "The message field, containing the plain text message." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["Hi from erlang"] + syntax: "literal" + } + } + } + proc_id: { + description: "The procid field extracted from log message." + name: "proc_id" + required: true + warnings: [] + type: { + string: { + examples: ["console"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + telemetry: { + metrics: { + request_read_errors_total: { + description: "The total number of request read errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "request_read_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `heroku_logs` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + kafka: { + kind: "source" + title: "Kafka" + features: { + collect: { + checkpoint: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: false + can_verify_hostname: false + enabled_default: false + } + from: { + service: { + name: "Kafka" + thing: "Kafka topics" + url: "https://kafka.apache.org/" + versions: ">= 0.8" + description: "[Apache Kafka](https://kafka.apache.org/) is an open-source project for a distributed publish-subscribe messaging system rethought as a distributed commit log. Kafka stores messages in topics that are partitioned and replicated across multiple brokers in a cluster. Producers send messages to topics from which consumers read. These features make it an excellent candidate for durably storing logs and metrics data." + } + interface: { + socket: { + api: { + title: "Kafka protocol" + url: "https://kafka.apache.org/protocol" + } + direction: "incoming" + port: 9093 + protocols: ["tcp"] + ssl: "optional" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Kafka context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + tls_collect: "Securely collects data via Transport Layer Security (TLS)." + } + } + classes: { + commonly_used: true + deployment_roles: ["aggregator"] + delivery: "at_least_once" + development: "stable" + egress_method: "stream" + stateful: false + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + auto_offset_reset: { + common: false + description: "If offsets for consumer group do not exist, set them using this strategy. [librdkafka documentation][urls.librdkafka_config] for `auto.offset.reset` option for explanation." + name: "auto_offset_reset" + required: false + warnings: [] + type: { + string: { + default: "largest" + examples: ["smallest", "earliest", "beginning", "largest", "latest", "end", "error"] + syntax: "literal" + } + } + } + bootstrap_servers: { + description: "A comma-separated list of host and port pairs that are the addresses of the Kafka brokers in a \"bootstrap\" Kafka cluster that a Kafka client connects to initially to bootstrap itself." + name: "bootstrap_servers" + required: true + warnings: [] + type: { + string: { + examples: ["10.14.22.123:9092,10.14.23.332:9092"] + syntax: "literal" + } + } + } + commit_interval_ms: { + common: false + description: """ + The frequency that the consumer offsets are committed (written) to offset storage. + + """ + name: "commit_interval_ms" + required: false + warnings: [] + type: { + uint: { + default: 5000 + examples: [5000, 10000] + unit: "milliseconds" + } + } + } + fetch_wait_max_ms: { + common: false + description: """ + Maximum time the broker may wait to fill the response. + + """ + name: "fetch_wait_max_ms" + required: false + warnings: [] + type: { + uint: { + default: 100 + examples: [50, 100] + unit: "milliseconds" + } + } + } + group_id: { + description: """ + The consumer group name to be used to consume events from Kafka. + + """ + name: "group_id" + required: true + warnings: [] + type: { + string: { + examples: ["consumer-group-name"] + syntax: "literal" + } + } + } + key_field: { + common: true + description: "The log field name to use for the Kafka message key. If unspecified, the key would not be added to the log event. If the message has null key, then this field would not be added to the log event." + name: "key_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["message_key"] + syntax: "literal" + } + } + } + topic_key: { + common: false + description: "The log field name to use for the Kafka topic. If unspecified, the key would not be added to the log event." + name: "topic_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["topic"] + syntax: "literal" + } + } + } + partition_key: { + common: false + description: "The log field name to use for the Kafka partition name. If unspecified, the key would not be added to the log event." + name: "partition_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["partition"] + syntax: "literal" + } + } + } + offset_key: { + common: false + description: "The log field name to use for the Kafka offset. If unspecified, the key would not be added to the log event." + name: "offset_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["offset"] + syntax: "literal" + } + } + } + librdkafka_options: { + common: false + category: "Librdkafka_options" + description: """ + Advanced options. See [librdkafka documentation](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) for details. + + """ + name: "librdkafka_options" + required: false + warnings: [] + type: { + object: { + examples: [{ + "client.id": "${ENV_VAR}" + "fetch.error.backoff.ms": "1000" + "socket.send.buffer.bytes": "100" + }] + options: {} + } + } + } + sasl: { + common: false + category: "Sasl" + description: "Options for SASL/SCRAM authentication support." + name: "sasl" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enable SASL/SCRAM authentication to the remote. (Not supported on Windows at this time.)" + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: null + } + } + } + mechanism: { + common: true + description: "The Kafka SASL/SCRAM mechanisms." + name: "mechanism" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["SCRAM-SHA-256", "SCRAM-SHA-512"] + syntax: "literal" + } + } + } + password: { + common: true + description: "The Kafka SASL/SCRAM authentication password." + name: "password" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["password"] + syntax: "literal" + } + } + } + username: { + common: true + description: "The Kafka SASL/SCRAM authentication username." + name: "username" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["username"] + syntax: "literal" + } + } + } + } + } + } + } + session_timeout_ms: { + common: false + description: """ + The Kafka session timeout in milliseconds. + + """ + name: "session_timeout_ms" + required: false + warnings: [] + type: { + uint: { + default: 10000 + examples: [5000, 10000] + unit: "milliseconds" + } + } + } + socket_timeout_ms: { + common: false + description: """ + Default timeout for network requests. + + """ + name: "socket_timeout_ms" + required: false + warnings: [] + type: { + uint: { + default: 60000 + examples: [30000, 60000] + unit: "milliseconds" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + topics: { + description: """ + The Kafka topics names to read events from. Regex is supported if the topic begins with `^`. + + """ + name: "topics" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["^(prefix1|prefix2)-.+", "topic-1", "topic-2"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + kafka: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "kafka" + #ExampleConfig: { + title: string + configuration: { + auto_offset_reset: null + bootstrap_servers: null + commit_interval_ms: null + fetch_wait_max_ms: null + group_id: null + key_field: null + topic_key: null + partition_key: null + offset_key: null + librdkafka_options: null + sasl: null + session_timeout_ms: null + socket_timeout_ms: null + topics: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + record: { + description: "An individual Kafka record" + name: "record" + fields: { + message: { + description: "The raw line from the Kafka record." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308"] + syntax: "literal" + } + } + } + offset: { + description: "The Kafka offset at the time the record was retrieved." + name: "offset" + required: true + warnings: [] + type: { + uint: { + examples: [100] + unit: null + } + } + } + partition: { + description: "The Kafka partition that the record came from." + name: "partition" + required: true + warnings: [] + type: { + string: { + examples: ["partition"] + syntax: "literal" + } + } + } + timestamp: { + description: "The timestamp encoded in the Kafka message or the current time if it cannot be fetched." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + topic: { + description: "The Kafka topic that the record came from." + name: "topic" + required: true + warnings: [] + type: { + string: { + examples: ["topic"] + syntax: "literal" + } + } + } + } + } + } + } + telemetry: { + metrics: { + consumer_offset_updates_failed_total: { + description: "The total number of failures to update a Kafka consumer offset." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "consumer_offset_updates_failed_total" + } + events_failed_total: { + description: "The total number of failures to read a Kafka message." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_failed_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + } + } + how_it_works: { + librdkafka: { + #Subsection: { + title: string + body: string + } + name: "librdkafka" + title: "librdkafka" + body: """ + The `kafka` sink uses [`librdkafka`](https://github.com/edenhill/librdkafka) under the hood. This + is a battle-tested, high performance, and reliable library that facilitates + communication with Kafka. As Vector produces static MUSL builds, + this dependency is packaged with Vector, meaning you do not need to install it. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `kafka` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + http: { + kind: "source" + title: "HTTP" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["aggregator", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "HTTP" + thing: "an HTTP client" + url: "https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Client_request" + versions: null + } + interface: { + socket: { + direction: "incoming" + port: 80 + protocols: ["http"] + ssl: "optional" + } + } + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful HTTP context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to accept connections on. The address _must_ include a port." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:80", "localhost:80"] + syntax: "literal" + } + } + } + encoding: { + common: true + description: "The expected encoding of received data. Note that for `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields." + name: "encoding" + required: false + warnings: [] + type: { + string: { + default: "text" + enum: { + text: "Newline-delimited text, with each line forming a message." + ndjson: "Newline-delimited JSON objects, where each line must contain a JSON object." + json: "Array of JSON objects, which must be a JSON array containing JSON objects." + } + syntax: "literal" + } + } + } + headers: { + common: false + description: "A list of HTTP headers to include in the log event. These will override any values included in the JSON payload with conflicting names." + name: "headers" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["User-Agent", "X-My-Custom-Header"] + syntax: "literal" + } + } + } + } + } + } + auth: { + common: false + category: "Auth" + description: "Options for HTTP Basic Authentication." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + username: { + description: "The basic authentication user name." + name: "username" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + query_parameters: { + common: false + description: "A list of URL query parameters to include in the log event. These will override any values included in the body with conflicting names." + name: "query_parameters" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["application", "source"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + http: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "http" + #ExampleConfig: { + title: string + configuration: { + address: null + encoding: null + headers: null + auth: null + query_parameters: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + text: { + description: "An individual line from a `text/plain` request" + name: "text" + fields: { + message: { + description: "The raw line line from the incoming payload." + name: "message" + relevant_when: "encoding == \"text\"" + required: true + warnings: [] + type: { + string: { + examples: ["Hello world"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + structured: { + description: "An individual line from a `application/json` request" + name: "structured" + fields: { + "*": { + common: false + description: "Any field contained in your JSON payload" + name: "*" + relevant_when: "encoding != \"text\"" + required: false + warnings: [] + type: { + "*": {} + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + examples: [{ + title: "text/plain" + configuration: { + address: "0.0.0.0:80" + encoding: "text" + auth: null + query_parameters: null + type: null + headers: ["User-Agent"] + tls: null + } + input: """ + ```http + Content-Type: text/plain + User-Agent: my-service/v2.1 + X-Forwarded-For: my-host.local + + Hello world + ``` + """ + output: [{ + log: { + host: "my-host.local" + message: "Hello world" + timestamp: "2020-10-10T17:07:36.452332Z" + "User-Agent": "my-service/v2.1" + } + }] + }, { + title: "application/json" + configuration: { + address: "0.0.0.0:80" + encoding: "json" + auth: null + query_parameters: null + type: null + headers: ["User-Agent"] + tls: null + } + input: """ + ```http + Content-Type: application/json + User-Agent: my-service/v2.1 + X-Forwarded-For: my-host.local + + {"key": "val"} + ``` + """ + output: [{ + log: { + host: "my-host.local" + key: "val" + timestamp: "2020-10-10T17:07:36.452332Z" + "User-Agent": "my-service/v2.1" + } + }] + }] + telemetry: { + metrics: { + http_bad_requests_total: { + description: "The total number of HTTP `400 Bad Request` errors encountered." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_bad_requests_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + } + } + how_it_works: { + decompression: { + #Subsection: { + title: string + body: string + } + name: "decompression" + title: "Decompression" + body: """ + Received body is decompressed according to `Content-Encoding` header. + Supported algorithms are `gzip`, `deflate`, and `snappy`. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `http` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + nginx_metrics: { + kind: "source" + title: "Nginx Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Nginx" + thing: "an Nginx server" + url: "https://www.nginx.com/" + versions: null + description: "[Nginx][urls.nginx] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server." + } + interface: { + socket: { + api: { + title: "Nginx ngx_http_stub_status_module module" + url: "http://nginx.org/en/docs/http/ngx_http_stub_status_module.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Nginx context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: ["Module `ngx_http_stub_status_module` should be enabled."] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoints: { + description: "HTTP/HTTPS endpoint to Nginx server with enabled `ngx_http_stub_status_module` module." + name: "endpoints" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["http://localhost:8000/basic_status"] + syntax: "literal" + } + } + } + } + } + } + scrape_interval_secs: { + description: "The interval between scrapes." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + namespace: { + description: "The namespace of metrics. Disabled if empty." + common: false + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "nginx" + syntax: "literal" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + nginx_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "nginx_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoints: null + scrape_interval_secs: null + namespace: null + tls: null + auth: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + how_it_works: { + mod_status: { + #Subsection: { + title: string + body: string + } + name: "mod_status" + title: "Module `ngx_http_stub_status_module`" + body: """ + The [ngx_http_stub_status_module][urls.nginx_stub_status_module] + module provides access to basic status information. Basic status + information is a simple web page with text data. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `nginx_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + collect_completed_total: { + description: "The total number of metrics collections completed for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_completed_total" + } + collect_duration_nanoseconds: { + description: "The duration spent collecting of metrics for this component." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_duration_nanoseconds" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + } + } + output: { + metrics: { + up: { + description: "If the Nginx server is up or not." + type: "gauge" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "up" + } + connections_active: { + description: "The current number of active client connections including `Waiting` connections." + type: "gauge" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_active" + } + connections_accepted_total: { + description: "The total number of accepted client connections." + type: "counter" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_accepted_total" + } + connections_handled_total: { + description: "The total number of handled connections. Generally, the parameter value is the same as `accepts` unless some resource limits have been reached (for example, the `worker_connections` limit)." + type: "counter" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_handled_total" + } + http_requests_total: { + description: "The total number of client requests." + type: "counter" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "http_requests_total" + } + connections_reading: { + description: "The current number of connections where nginx is reading the request header." + type: "gauge" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_reading" + } + connections_writing: { + description: "The current number of connections where nginx is writing the response back to the client." + type: "gauge" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_writing" + } + connections_waiting: { + description: "The current number of idle client connections waiting for a request." + type: "gauge" + default_namespace: "nginx" + tags: { + endpoint: { + name: "endpoint" + description: "Nginx endpoint." + required: true + examples: ["http://localhost:8000/basic_status"] + } + host: { + name: "host" + description: "The hostname of the Nginx server." + required: true + examples: ["my-host.local"] + } + } + name: "connections_waiting" + } + } + } + } + docker_logs: { + kind: "source" + title: "Docker" + alias: "docker" + classes: { + commonly_used: false + delivery: "best_effort" + deployment_roles: ["daemon"] + development: "stable" + egress_method: "stream" + stateful: false + } + env_vars: { + DOCKER_HOST: { + description: "The Docker host to connect to when `docker_host` configuration is absent." + name: "DOCKER_HOST" + common: true + type: { + string: { + default: null + examples: ["unix:///var/run/docker.sock"] + syntax: "literal" + } + } + required: false + warnings: [] + } + DOCKER_CERT_PATH: { + description: """ + Path to look for TLS certificates when `tls` configuration is absent. + Vector will use: + - `$DOCKER_CERT_PATH/ca.pem`: CA certificate. + - `$DOCKER_CERT_PATH/cert.pem`: TLS certificate. + - `$DOCKER_CERT_PATH/key.pem`: TLS key. + """ + name: "DOCKER_CERT_PATH" + common: true + type: { + string: { + default: null + examples: ["certs/"] + syntax: "literal" + } + } + required: false + warnings: [] + } + DOCKER_CONFIG: { + description: "Path to look for TLS certificates when both `tls` configuration and `DOCKER_CERT_PATH` are absent." + name: "DOCKER_CONFIG" + common: true + type: { + string: { + default: null + examples: ["certs/"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Docker" + thing: "the Docker platform" + url: "https://www.docker.com/" + versions: ">= 1.24" + setup: [{ + title: "Install Docker" + description: "Install Docker by following the Docker setup tutorial." + detour: { + url: "https://docs.docker.com/get-docker/" + } + }, { + title: "Verify Docker logs" + description: """ + Ensure that the Docker Engine is properly exposing logs: + + ```bash + docker logs $(docker ps | awk '{ print $1 }') + ``` + + If you receive an error it's likely that you do not have the proper Docker + logging drivers installed. The Docker Engine requires the [`json-file`](https://docs.docker.com/config/containers/logging/json-file/) (default), + [`journald`](docker_logging_driver_journald), or [`local`](https://docs.docker.com/config/containers/logging/local/) Docker + logging drivers to be installed. + """ + }] + } + interface: { + socket: { + api: { + title: "Docker Engine API" + url: "https://docs.docker.com/engine/api/" + } + direction: "outgoing" + permissions: { + unix: { + group: "docker" + } + } + protocols: ["http"] + socket: "/var/run/docker.sock" + ssl: "disabled" + } + } + } + } + multiline: { + enabled: true + } + descriptions: { + collect_context: "Enriches data with useful Docker context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + multiline: "Merges multi-line logs into one event." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + "x86_64-apple-darwin": true + } + requirements: [] + warnings: [""" + Collecting logs directly from the Docker Engine is known to have + performance problems for very large setups. If you have a large + setup, please consider alternative collection methods, such as the + Docker [`syslog`](https://docs.docker.com/config/containers/logging/syslog/) or + [Docker `journald` driver](https://docs.docker.com/config/containers/logging/journald/) + drivers. + """, """ + To avoid collecting logs from itself when deployed as a container, + the Docker source uses current hostname to find out which container + it is inside. If a container's ID matches the hostname, that container + will be excluded. + If you change container's hostname, consider manually excluding Vector + container using [`exclude_containers`](#exclude_containers). + """] + notices: [] + } + installation: { + platform_name: "docker" + } + configuration: { + docker_host: { + common: true + description: """ + The Docker host to connect to. Use an HTTPS URL to enable TLS encryption. + If absent, Vector will try to use `DOCKER_HOST` enviroment variable. + If `DOCKER_HOST` is also absent, Vector will use default Docker local socket + (`/var/run/docker.sock` on Unix flatforms, `//./pipe/docker_engine` on Windows). + """ + name: "docker_host" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["http://localhost:2375", "https://localhost:2376", "unix:///var/run/docker.sock", "npipe:////./pipe/docker_engine", "/var/run/docker.sock", "//./pipe/docker_engine"] + syntax: "literal" + } + } + } + tls: { + common: false + category: "Tls" + description: """ + TLS options to connect to the Docker deamon. This has no effect unless `docker_host` is an HTTPS URL. + If absent, Vector will try to use environment variable `DOCKER_CERT_PATH` and then `DOCKER_CONFIG`. + If both environment variables are absent, Vector will try to read certificates in `~/.docker/`. + """ + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + description: "Path to CA certificate file." + name: "ca_file" + required: true + warnings: [] + type: { + string: { + examples: ["certs/ca.pem"] + syntax: "literal" + } + } + } + crt_file: { + description: "Path to TLS certificate file." + name: "crt_file" + required: true + warnings: [] + type: { + string: { + examples: ["certs/cert.pem"] + syntax: "literal" + } + } + } + key_file: { + description: "Path to TLS key file." + name: "key_file" + required: true + warnings: [] + type: { + string: { + examples: ["certs/key.pem"] + syntax: "literal" + } + } + } + } + } + } + } + auto_partial_merge: { + common: false + description: """ + Setting this to `false` will disable the automatic merging + of partial events. + """ + name: "auto_partial_merge" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + exclude_containers: { + common: false + description: """ + A list of container IDs _or_ names to match against for + containers you don't want to collect logs from. Prefix matches + are supported, so you can supply just the first few characters + of the ID or name of containers you want to exclude. This can be + used in conjunction with + [`include_containers`](#include_containers). + """ + name: "exclude_containers" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["exclude_", "exclude_me_0", "ad08cc418cf9"] + syntax: "literal" + } + } + } + } + } + } + include_containers: { + common: true + description: """ + A list of container IDs _or_ names to match against for + containers you want to collect logs from. Prefix matches are + supported, so you can supply just the first few characters of + the ID or name of containers you want to include. This can be + used in conjunction with + [`exclude_containers`](#exclude_containers). + """ + name: "include_containers" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["include_", "include_me_0", "ad08cc418cf9"] + syntax: "literal" + } + } + } + } + } + } + include_labels: { + common: true + description: """ + A list of container object labels to match against when + filtering running containers. This should follow the + described label's syntax in [docker object labels docs](https://docs.docker.com/config/labels-custom-metadata/). + """ + name: "include_labels" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["com.example.vendor=Timber Inc.", "com.example.name=Vector"] + syntax: "literal" + } + } + } + } + } + } + include_images: { + common: true + description: """ + A list of image names to match against. If not provided, all + images will be included. + """ + name: "include_images" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["httpd", "redis"] + syntax: "literal" + } + } + } + } + } + } + retry_backoff_secs: { + common: false + description: "The amount of time to wait before retrying after an error." + name: "retry_backoff_secs" + required: false + warnings: [] + type: { + uint: { + unit: "seconds" + default: 1 + } + } + } + multiline: { + common: false + category: "Multiline" + description: "Multiline parsing configuration. If not specified, multiline parsing is disabled." + name: "multiline" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + condition_pattern: { + description: "Condition regex pattern to look for. Exact behavior is configured via `mode`." + name: "condition_pattern" + required: true + warnings: [] + sort: 3 + type: { + string: { + examples: ["^[\\s]+", "\\\\$", "^(INFO|ERROR) ", ";$"] + syntax: "regex" + } + } + } + mode: { + description: "Mode of operation, specifies how the `condition_pattern` is interpreted." + name: "mode" + required: true + warnings: [] + sort: 2 + type: { + string: { + enum: { + continue_through: "All consecutive lines matching this pattern are included in the group. The first line (the line that matched the start pattern) does not need to match the `ContinueThrough` pattern. This is useful in cases such as a Java stack trace, where some indicator in the line (such as leading whitespace) indicates that it is an extension of the preceding line." + continue_past: "All consecutive lines matching this pattern, plus one additional line, are included in the group. This is useful in cases where a log message ends with a continuation marker, such as a backslash, indicating that the following line is part of the same message." + halt_before: "All consecutive lines not matching this pattern are included in the group. This is useful where a log line contains a marker indicating that it begins a new message." + halt_with: "All consecutive lines, up to and including the first line matching this pattern, are included in the group. This is useful where a log line ends with a termination marker, such as a semicolon." + } + examples: ["continue_through", "continue_past", "halt_before", "halt_with"] + syntax: "literal" + } + } + } + start_pattern: { + description: "Start regex pattern to look for as a beginning of the message." + name: "start_pattern" + required: true + warnings: [] + sort: 1 + type: { + string: { + examples: ["^[^\\s]", "\\\\$", "^(INFO|ERROR) ", "[^;]$"] + syntax: "regex" + } + } + } + timeout_ms: { + description: "The maximum time to wait for the continuation. Once this timeout is reached, the buffered message is guaranteed to be flushed, even if incomplete." + name: "timeout_ms" + required: true + warnings: [] + sort: 4 + type: { + uint: { + examples: [1_000, 600_000] + unit: "milliseconds" + } + } + } + } + } + } + } + host_key: { + category: "Context" + common: false + description: "The key name added to each event representing the current host. This can also be globally set via the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + docker_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + type: "docker_logs" + #ExampleConfig: { + title: string + configuration: { + docker_host: null + tls: null + auto_partial_merge: null + exclude_containers: null + include_containers: null + include_labels: null + include_images: null + retry_backoff_secs: null + host_key: null + type: null + multiline: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + log: { + description: "A Docker log event" + name: "log" + fields: { + container_created_at: { + description: "A UTC timestamp representing when the container was created." + name: "container_created_at" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + container_id: { + description: "The Docker container ID that the log was collected from." + name: "container_id" + required: true + warnings: [] + type: { + string: { + examples: ["9b6247364a03", "715ebfcee040"] + syntax: "literal" + } + } + } + container_name: { + description: "The Docker container name that the log was collected from." + name: "container_name" + required: true + warnings: [] + type: { + string: { + examples: ["evil_ptolemy", "nostalgic_stallman"] + syntax: "literal" + } + } + } + image: { + description: "The image name that the container is based on." + name: "image" + required: true + warnings: [] + type: { + string: { + examples: ["ubuntu:latest", "busybox", "timberio/vector:latest-alpine"] + syntax: "literal" + } + } + } + message: { + description: "The raw log message." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["Started GET / for 127.0.0.1 at 2012-03-10 14:28:14 +0100"] + syntax: "literal" + } + } + } + stream: { + description: "The [standard stream](https://en.wikipedia.org/wiki/Standard_streams) that the log was collected from." + name: "stream" + required: true + warnings: [] + type: { + string: { + enum: { + stdout: "The STDOUT stream" + stderr: "The STDERR stream" + } + examples: ["stdout", "stderr"] + syntax: "literal" + } + } + } + timestamp: { + description: "The UTC timestamp extracted from the Docker log event." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + "*": { + description: "Each container label is inserted with it's exact key/value pair." + name: "*" + required: true + warnings: [] + type: { + string: { + examples: ["Started GET / for 127.0.0.1 at 2012-03-10 14:28:14 +0100"] + syntax: "literal" + } + } + } + } + } + } + } + examples: [{ + title: "Dummy Logs" + configuration: { + include_images: ["mingrammer/flog"] + docker_host: null + tls: null + auto_partial_merge: null + exclude_containers: null + include_containers: null + include_labels: null + retry_backoff_secs: null + host_key: null + type: null + multiline: null + } + input: """ + ```json + { + "stream": "stdout", + "message": "150.75.72.205 - - [03/Oct/2020:16:11:29 +0000] "HEAD /initiatives HTTP/1.1" 504 117" + } + ``` + """ + output: { + log: { + container_created_at: "2020-10-03T16:11:29.443232Z" + container_id: "fecc98177eca7fb75a2b2186c418bf9a0cd3a05a1169f2e2293bf8987a9d96ab" + container_name: "flog" + image: "mingrammer/flog" + message: "150.75.72.205 - - [03/Oct/2020:16:11:29 +0000] \"HEAD /initiatives HTTP/1.1\" 504 117" + stream: "stdout" + host: "my-host.local" + } + } + }] + how_it_works: { + message_merging: { + #Subsection: { + title: string + body: string + } + name: "message_merging" + title: "Merging Split Messages" + body: """ + Docker, by default, will split log messages that exceed 16kb. This can be a + rather frustrating problem because it produces malformed log messages that are + difficult to work with. Vector's solves this by default, automatically merging + these messages into a single message. You can turn this off via the + `auto_partial_merge` option. Furthermore, you can adjust the marker + that we use to determine if an event is partial via the + `partial_event_marker_field` option. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `docker_logs` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + communication_errors_total: { + description: "The total number of errors stemming from communication with the Docker daemon." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "communication_errors_total" + } + container_metadata_fetch_errors_total: { + description: "The total number of errors encountered when fetching container metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "container_metadata_fetch_errors_total" + } + container_processed_events_total: { + description: "The total number of container events processed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "container_processed_events_total" + } + containers_unwatched_total: { + description: "The total number of times Vector stopped watching for container logs." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "containers_unwatched_total" + } + containers_watched_total: { + description: "The total number of times Vector started watching for container logs." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "containers_watched_total" + } + logging_driver_errors_total: { + description: """ + The total number of logging driver errors encountered caused by not using either + the `jsonfile` or `journald` driver. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "logging_driver_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + } + } + } + splunk_hec: { + kind: "source" + title: "Splunk HTTP Event Collector (HEC)" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["aggregator"] + development: "stable" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "Splunk" + thing: "a Splunk database" + url: "https://www.splunk.com" + versions: null + } + interface: { + socket: { + api: { + title: "Splunk HEC" + url: "https://docs.splunk.com/Documentation/Splunk/8.0.0/Data/HECRESTendpoints" + } + direction: "incoming" + port: 8080 + protocols: ["http"] + ssl: "optional" + } + } + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful Splunk context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + common: true + description: "The address to accept connections on." + name: "address" + required: false + warnings: [] + type: { + string: { + default: "0.0.0.0:8080" + syntax: "literal" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + token: { + common: true + description: "If supplied, incoming requests must supply this token in the `Authorization` header, just as a client would if it was communicating with the Splunk HEC endpoint directly. If _not_ supplied, the `Authorization` header will be ignored and requests will not be authenticated." + name: "token" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["A94A8FE5CCB19BA61C4C08"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + splunk_hec: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "splunk_hec" + #ExampleConfig: { + title: string + configuration: { + address: null + token: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + event: { + description: "A single event" + name: "event" + fields: { + message: { + description: "The raw line, unparsed." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["2019-02-13T19:48:34+00:00 [info] Started GET \"/\" for 127.0.0.1"] + syntax: "literal" + } + } + } + splunk_channel: { + description: "The Splunk channel, value of the `X-Splunk-Request-Channel` header." + name: "splunk_channel" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + telemetry: { + metrics: { + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `splunk_hec` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + apache_metrics: { + kind: "source" + title: "Apache HTTP Server (HTTPD) Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Apache HTTP server (HTTPD)" + thing: "an Apache HTTP server (HTTPD)" + url: "https://httpd.apache.org" + versions: null + setup: [{ + title: "Install Apache HTTP" + description: "Install Apache HTTP by following their installation instructions." + detour: { + url: "https://httpd.apache.org/docs/current/install.html" + } + }] + connect_to: { + vector: { + metrics: { + setup: [{ + title: "Enable the Apache Status Module" + description: """ + Enable the [Apache Status module](http://httpd.apache.org/docs/current/mod/mod_status.html) + in your Apache config. + + ```text file="/etc/apache2/httpd.conf" + # ... + + + SetHandler server-status + Require host example.com + + + # ... + ``` + """ + }, { + title: "Optionally enable ExtendedStatus" + description: """ + Optionally enable [`ExtendedStatus` option](https://httpd.apache.org/docs/current/mod/core.html#extendedstatus) + for more detailed metrics. + + ```text file="/etc/apache2/httpd.conf" + # ... + + ExtendedStatus On + + # ... + ``` + """ + notes: ["This defaults to `On` in Apache >= 2.3.6."] + }, { + title: "Apply the Apache config changes" + description: "Start or reload Apache to apply the config changes." + }] + } + } + } + } + interface: { + socket: { + api: { + title: "Apache HTTP Server Status Module" + url: "http://httpd.apache.org/docs/current/mod/mod_status.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "disabled" + } + } + } + } + descriptions: { + collect_context: "Enriches data with useful Apache HTTP server (HTTPD) context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: ["The [Apache Status module](http://httpd.apache.org/docs/current/mod/mod_status.html) must be enabled."] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoints: { + description: "mod_status endpoints to scrape metrics from." + name: "endpoints" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["http://localhost:8080/server-status/?auto"] + syntax: "literal" + } + } + } + } + } + } + scrape_interval_secs: { + description: "The interval between scrapes." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + namespace: { + description: "The namespace of the metric. Disabled if empty." + name: "namespace" + required: false + common: false + warnings: [] + type: { + string: { + default: "apache" + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + apache_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "apache_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoints: null + scrape_interval_secs: null + namespace: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + access_total: { + description: "The total number of time the Apache server has been accessed." + relevant_when: "`ExtendedStatus On`" + type: "counter" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "access_total" + } + connections: { + description: "The total number of time the Apache server has been accessed." + type: "gauge" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + state: { + name: "state" + description: "The state of the connection" + required: true + examples: ["closing", "keepalive", "total", "writing"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "connections" + } + cpu_load: { + description: "The current CPU of the Apache server." + relevant_when: "`ExtendedStatus On`" + type: "gauge" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "cpu_load" + } + cpu_seconds_total: { + description: "The CPU time of various Apache processes." + relevant_when: "`ExtendedStatus On`" + type: "counter" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + state: { + name: "state" + description: "The state of the connection" + required: true + examples: ["children_system", "children_user", "system", "user"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "cpu_seconds_total" + } + duration_seconds_total: { + description: "The amount of time the Apache server has been running." + relevant_when: "`ExtendedStatus On`" + type: "counter" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "duration_seconds_total" + } + scoreboard: { + description: "The amount of times various Apache server tasks have been run." + type: "gauge" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + state: { + name: "state" + description: "The connect state" + required: true + examples: ["closing", "dnslookup", "finishing", "idle_cleanup", "keepalive", "logging", "open", "reading", "sending", "starting", "waiting"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "scoreboard" + } + sent_bytes_total: { + description: "The amount of bytes sent by the Apache server." + relevant_when: "`ExtendedStatus On`" + type: "counter" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "sent_bytes_total" + } + up: { + description: "If the Apache server is up or not." + type: "gauge" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "up" + } + uptime_seconds_total: { + description: "The amount of time the Apache server has been running." + type: "counter" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "uptime_seconds_total" + } + workers: { + description: "Apache worker statuses." + type: "gauge" + default_namespace: "apache" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of originating file." + required: true + examples: ["http://localhost:8080/server-status?auto"] + } + state: { + name: "state" + description: "The state of the worker" + required: true + examples: ["busy", "idle"] + } + host: { + name: "host" + description: "The hostname of the Apache HTTP server." + required: true + examples: ["my-host.local"] + } + } + name: "workers" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `apache_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + http_error_response_total: { + description: "The total number of HTTP error responses for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_error_response_total" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + requests_completed_total: { + description: "The total number of requests completed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_completed_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + request_duration_nanoseconds: { + description: "The total request duration in nanoseconds." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_duration_nanoseconds" + } + } + } + } + syslog: { + kind: "source" + title: "Syslog" + classes: { + commonly_used: true + delivery: "best_effort" + deployment_roles: ["aggregator", "sidecar"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "Syslog" + thing: "Syslog" + url: "https://en.wikipedia.org/wiki/Syslog" + versions: null + description: "[Syslog](https://en.wikipedia.org/wiki/Syslog) stands for System Logging Protocol and is a standard protocol used to send system log or event messages to a specific server, called a syslog server. It is used to collect various device logs from different machines and send them to a central location for monitoring and review." + } + interface: { + socket: { + api: { + title: "Syslog" + url: "https://en.wikipedia.org/wiki/Syslog" + } + direction: "incoming" + port: 514 + protocols: ["tcp", "unix", "udp"] + ssl: "optional" + } + } + } + receive_buffer_bytes: { + enabled: true + relevant_when: "mode = `tcp` or mode = `udp` && os = `unix`" + } + keepalive: { + enabled: true + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful Syslog context." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to listen for connections on, or `systemd#N` to use the Nth socket passed by systemd socket activation. If an address is used it _must_ include a port." + name: "address" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:9000", "systemd", "systemd#3"] + syntax: "literal" + } + } + } + host_key: { + category: "Context" + common: false + description: "The key name added to each event representing the current host. This can also be globally set via the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + max_length: { + common: true + description: "The maximum bytes size of incoming messages before they are discarded." + name: "max_length" + required: false + warnings: [] + type: { + uint: { + default: 102400 + unit: "bytes" + } + } + } + mode: { + description: "The type of socket to use." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + tcp: "TCP socket." + udp: "UDP socket." + unix_datagram: "Unix domain datagram socket." + unix_stream: "Unix domain stream socket." + } + examples: ["tcp", "udp", "unix_datagram", "unix_stream"] + syntax: "literal" + } + } + } + path: { + description: "The unix socket path. *This should be an absolute path*." + name: "path" + relevant_when: "mode = `unix`" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/socket"] + syntax: "literal" + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the source." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + category: "Tls" + common: false + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + shutdown_timeout_secs: { + common: false + description: "The timeout before a connection is forcefully closed during shutdown." + name: "shutdown_timeout_secs" + relevant_when: "mode = `tcp``" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + syslog: "The type of this component." + socket: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "syslog" + #ExampleConfig: { + title: string + configuration: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual Syslog event" + name: "line" + fields: { + appname: { + description: "The appname extracted from the Syslog formatted line. If a appname is not found, then the key will not be added." + name: "appname" + required: true + warnings: [] + type: { + string: { + examples: ["app-name"] + syntax: "literal" + } + } + } + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + hostname: { + description: "The hostname extracted from the Syslog line. (`host` is also this value if it exists in the log.)" + name: "hostname" + required: true + warnings: [] + type: { + string: { + examples: ["my.host.com"] + syntax: "literal" + } + } + } + facility: { + description: "The facility extracted from the Syslog line. If a facility is not found, then the key will not be added." + name: "facility" + required: true + warnings: [] + type: { + string: { + examples: ["1"] + syntax: "literal" + } + } + } + message: { + description: "The message extracted from the Syslog line." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["Hello world"] + syntax: "literal" + } + } + } + msgid: { + description: "The msgid extracted from the Syslog line. If a msgid is not found, then the key will not be added." + name: "msgid" + required: true + warnings: [] + type: { + string: { + examples: ["ID47"] + syntax: "literal" + } + } + } + procid: { + description: "The procid extracted from the Syslog line. If a procid is not found, then the key will not be added." + name: "procid" + required: true + warnings: [] + type: { + string: { + examples: ["8710"] + syntax: "literal" + } + } + } + severity: { + description: "The severity extracted from the Syslog line. If a severity is not found, then the key will not be added." + name: "severity" + required: true + warnings: [] + type: { + string: { + examples: ["notice"] + syntax: "literal" + } + } + } + source_ip: { + description: "The upstream hostname. In the case where `mode` = `\"unix\"` the socket path will be used. (`host` is also this value if `hostname` does not exist in the log.)" + name: "source_ip" + required: true + warnings: [] + type: { + string: { + examples: ["127.0.0.1"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + version: { + description: "The version extracted from the Syslog line. If a version is not found, then the key will not be added." + name: "version" + required: true + warnings: [] + type: { + uint: { + examples: [1] + unit: null + } + } + } + "*": { + description: "In addition to the defined fields, any Syslog 5424 structured fields are parsed and inserted as root level fields." + name: "*" + required: true + warnings: [] + type: { + string: { + examples: ["hello world"] + syntax: "literal" + } + } + } + } + } + } + } + examples: [{ + title: "Syslog Eve" + configuration: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + input: """ + ```text + <13>1 2020-03-13T20:45:38.119Z dynamicwireless.name non 2426 ID931 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] Try to override the THX port, maybe it will reboot the neural interface! + ``` + """ + output: { + log: { + severity: "notice" + facility: "user" + timestamp: "2020-03-13T20:45:38.119Z" + host: "my-host.local" + source_ip: "34.33.222.212" + hostname: "dynamicwireless.name" + appname: "non" + procid: "2426" + msgid: "ID931" + iut: "3" + eventSource: "Application" + eventID: "1011" + message: "Try to override the THX port, maybe it will reboot the neural interface!" + } + } + }] + how_it_works: { + line_delimiters: { + #Subsection: { + title: string + body: string + } + name: "line_delimiters" + title: "Line Delimiters" + body: "Each line is read until a new line delimiter, the `0xA` byte, is found." + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + parsing: { + #Subsection: { + title: string + body: string + } + name: "parsing" + title: "Parsing" + body: """ + Vector makes a _best effort_ to parse the various Syslog formats out in the + wild. This includes [RFC 6587][urls.syslog_6587], [RFC 5424][urls.syslog_5424], + [RFC 3164][urls.syslog_3164], and other common variations (such as the Nginx + Syslog style). It's unfortunate that the Syslog specification is not more + accurately followed, but we hope Vector insulates you from these deviations. + + If parsing fails, Vector will include the entire Syslog line in the `message` + key. If you find this happening often, we recommend using the + [`socket` source][docs.sources.socket] combined with the + [`regex_parser` transform][docs.transforms.regex_parser] to implement your own + ingestion and parsing scheme. Or, [open an issue](https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature) + requesting support for your specific format. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `syslog` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + connection_read_errors_total: { + description: "The total number of errors reading datagram." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + mode: { + name: "mode" + description: "" + required: true + enum: { + udp: "User Datagram Protocol" + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "connection_read_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + utf8_convert_errors_total: { + description: "The total number of errors converting bytes to a UTF-8 string in UDP mode." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + mode: { + name: "mode" + description: "The connection mode used by the component." + required: true + enum: { + udp: "User Datagram Protocol" + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "utf8_convert_errors_total" + } + } + } + } + mongodb_metrics: { + kind: "source" + title: "MongoDB Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "MongoDB" + thing: "an MongoDB instance" + url: "https://www.mongodb.com" + versions: null + description: "[MongoDB][urls.mongodb] is a general purpose, document-based, distributed database built for modern application developers and for the cloud era." + } + interface: { + socket: { + api: { + title: "MongoDB serverStatus command" + url: "https://docs.mongodb.com/manual/reference/command/serverStatus/" + } + direction: "outgoing" + protocols: ["tcp"] + ssl: "optional" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful MongoDB context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [""" + User from endpoint should have enough privileges for running + [serverStatus][urls.mongodb_command_server_status] command. + """] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoints: { + description: "MongoDB [Connection String URI Format][urls.mongodb_connection_string_uri_format]" + name: "endpoints" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["mongodb://localhost:27017"] + syntax: "literal" + } + } + } + } + } + } + scrape_interval_secs: { + description: "The interval between scrapes." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + namespace: { + description: "The namespace of metrics. Disabled if empty." + common: false + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "mongodb" + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + mongodb_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "mongodb_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoints: null + scrape_interval_secs: null + namespace: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + how_it_works: { + mod_status: { + #Subsection: { + title: string + body: string + } + name: "mod_status" + title: "MongoDB `serverStatus` command" + body: """ + The [serverStatus][urls.mongodb_command_server_status] command + returns a document that provides an overview of the database’s + state. The output fields vary depending on the version of + MongoDB, underlying operating system platform, the storage + engine, and the kind of node, including `mongos`, `mongod` or + `replica set` member. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `mongodb_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + collect_completed_total: { + description: "The total number of metrics collections completed for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_completed_total" + } + collect_duration_nanoseconds: { + description: "The duration spent collecting of metrics for this component." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_duration_nanoseconds" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + request_errors_total: { + description: "The total number of requests errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_errors_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + } + } + output: { + metrics: { + assets_total: { + description: "Number of assertions raised since the MongoDB process started." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "The assertion type" + required: true + examples: ["regular", "warning", "msg", "user", "rollovers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "assets_total" + } + bson_parse_error_total: { + description: "The total number of BSON parsing errors." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "bson_parse_error_total" + } + connections: { + description: "Number of connections in some state." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + state: { + name: "state" + description: "The connection state" + required: true + examples: ["active", "available", "current"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "connections" + } + extra_info_heap_usage_bytes: { + description: "The total size in bytes of heap space used by the database process." + relevant_when: "Unix/Linux" + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "extra_info_heap_usage_bytes" + } + extra_info_page_faults: { + description: "The total number of page faults." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "extra_info_page_faults" + } + instance_local_time: { + description: "The ISODate representing the current time, according to the server, in UTC." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "instance_local_time" + } + instance_uptime_estimate_seconds_total: { + description: "The uptime in seconds as calculated from MongoDB’s internal course-grained time keeping system." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "instance_uptime_estimate_seconds_total" + } + instance_uptime_seconds_total: { + description: "The number of seconds that the current MongoDB process has been active." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "instance_uptime_seconds_total" + } + memory: { + description: "Current memory unsage." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Memory type" + required: true + examples: ["resident", "virtual", "mapped", "mapped_with_journal"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "memory" + } + mongod_global_lock_active_clients: { + description: "Number of connected clients and the read and write operations performed by these clients." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Number type." + required: true + examples: ["total", "readers", "writers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_global_lock_active_clients" + } + mongod_global_lock_current_queue: { + description: "Number of operations queued because of a lock." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Number type." + required: true + examples: ["total", "readers", "writers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_global_lock_current_queue" + } + mongod_global_lock_total_time_seconds: { + description: "The time since the database last started and created the globalLock. This is roughly equivalent to total server uptime." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_global_lock_total_time_seconds" + } + mongod_locks_time_acquiring_global_seconds_total: { + description: "Amount of time that any database has spent waiting for the global lock." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Lock type." + required: true + examples: ["ParallelBatchWriterMode", "ReplicationStateTransition", "Global", "Database", "Collection", "Mutex", "Metadata", "oplog"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + mode: { + name: "mode" + description: "Lock mode." + required: true + examples: ["read", "write"] + } + } + name: "mongod_locks_time_acquiring_global_seconds_total" + } + mongod_metrics_cursor_open: { + description: "Number of cursors." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + state: { + name: "state" + description: "Cursor state." + required: true + examples: ["no_timeout", "pinned", "total"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_cursor_open" + } + mongod_metrics_cursor_timed_out_total: { + description: "The total number of cursors that have timed out since the server process started." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_cursor_timed_out_total" + } + mongod_metrics_document_total: { + description: "Document access and modification patterns." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + state: { + name: "state" + description: "Document state." + required: true + examples: ["deleted", "inserted", "returned", "updated"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_document_total" + } + mongod_metrics_get_last_error_wtime_num: { + description: "The total number of getLastError operations with a specified write concern." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_get_last_error_wtime_num" + } + mongod_metrics_get_last_error_wtime_seconds_total: { + description: "The total amount of time that the mongod has spent performing getLastError operations." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_get_last_error_wtime_seconds_total" + } + mongod_metrics_get_last_error_wtimeouts_total: { + description: "The number of times that write concern operations have timed out as a result of the wtimeout threshold to getLastError." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_get_last_error_wtimeouts_total" + } + mongod_metrics_operation_total: { + description: "Update and query operations that MongoDB handles using special operation types." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Operation type." + required: true + examples: ["scan_and_order", "write_conflicts"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_operation_total" + } + mongod_metrics_query_executor_total: { + description: "Data from query execution system." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + state: { + name: "state" + description: "Query state." + required: true + examples: ["scanned", "scanned_objects", "collection_scans"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_query_executor_total" + } + mongod_metrics_record_moves_total: { + description: "Moves reports the total number of times documents move within the on-disk representation of the MongoDB data set. Documents move as a result of operations that increase the size of the document beyond their allocated record size." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_record_moves_total" + } + mongod_metrics_repl_apply_batches_num_total: { + description: "The total number of batches applied across all databases." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_apply_batches_num_total" + } + mongod_metrics_repl_apply_batches_seconds_total: { + description: "The total amount of time the mongod has spent applying operations from the oplog." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_apply_batches_seconds_total" + } + mongod_metrics_repl_apply_ops_total: { + description: "The total number of oplog operations applied." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_apply_ops_total" + } + mongod_metrics_repl_buffer_count: { + description: "The current number of operations in the oplog buffer." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_buffer_count" + } + mongod_metrics_repl_buffer_max_size_bytes_total: { + description: "The maximum size of the buffer." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_buffer_max_size_bytes_total" + } + mongod_metrics_repl_buffer_size_bytes: { + description: "The current size of the contents of the oplog buffer." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_buffer_size_bytes" + } + mongod_metrics_repl_executor_queue: { + description: "Number of queued operations in the replication executor." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Queue type." + required: true + examples: ["network_in_progress", "sleepers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_executor_queue" + } + mongod_metrics_repl_executor_unsignaled_events: { + description: "Number of unsignaled events in the replication executor." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_executor_unsignaled_events" + } + mongod_metrics_repl_network_bytes_total: { + description: "The total amount of data read from the replication sync source." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_network_bytes_total" + } + mongod_metrics_repl_network_getmores_num_total: { + description: "The total number of getmore operations, which are operations that request an additional set of operations from the replication sync source." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_network_getmores_num_total" + } + mongod_metrics_repl_network_getmores_seconds_total: { + description: "The total amount of time required to collect data from getmore operations." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_network_getmores_seconds_total" + } + mongod_metrics_repl_network_ops_total: { + description: "The total number of operations read from the replication source." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_network_ops_total" + } + mongod_metrics_repl_network_readers_created_total: { + description: "The total number of oplog query processes created." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_repl_network_readers_created_total" + } + mongod_metrics_ttl_deleted_documents_total: { + description: "The total number of documents deleted from collections with a ttl index." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_ttl_deleted_documents_total" + } + mongod_metrics_ttl_passes_total: { + description: "The number of times the background process removes documents from collections with a ttl index." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_metrics_ttl_passes_total" + } + mongod_op_latencies_histogram: { + description: "Latency statistics." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Latency type." + required: true + examples: ["reads", "writes", "commands"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + micros: { + name: "micros" + description: "Bucket." + required: true + examples: ["1", "2", "4096", "16384", "49152"] + } + } + name: "mongod_op_latencies_histogram" + } + mongod_op_latencies_latency: { + description: "A 64-bit integer giving the total combined latency in microseconds." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Latency type." + required: true + examples: ["network_in_progress", "sleepers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_op_latencies_latency" + } + mongod_op_latencies_ops_total: { + description: "A 64-bit integer giving the total number of operations performed on the collection since startup." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Latency type." + required: true + examples: ["network_in_progress", "sleepers"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_op_latencies_ops_total" + } + mongod_storage_engine: { + description: "The name of the current storage engine." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + engine: { + name: "engine" + description: "Engine name." + required: true + examples: ["wiredTiger"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_storage_engine" + } + mongod_wiredtiger_blockmanager_blocks_total: { + description: "Statistics on the block manager operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Operation type." + required: true + examples: ["blocks_read", "blocks_read_mapped", "blocks_pre_loaded", "blocks_written"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_blockmanager_blocks_total" + } + mongod_wiredtiger_blockmanager_bytes_total: { + description: "Statistics on the block manager operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Operation type." + required: true + examples: ["bytes_read", "bytes_read_mapped", "bytes_written"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_blockmanager_bytes_total" + } + mongod_wiredtiger_cache_bytes: { + description: "Statistics on the cache and page evictions from the cache." + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Statistics type." + required: true + examples: ["total", "dirty", "internal_pages", "leaf_pages"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_bytes" + } + mongod_wiredtiger_cache_bytes_total: { + description: "Statistics on the cache and page evictions from the cache." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Statistics type." + required: true + examples: ["read", "written"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_bytes_total" + } + mongod_wiredtiger_cache_evicted_total: { + description: "Statistics on the cache and page evictions from the cache." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Statistics type." + required: true + examples: ["modified", "unmodified"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_evicted_total" + } + mongod_wiredtiger_cache_max_bytes: { + description: "Maximum cache size." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_max_bytes" + } + mongod_wiredtiger_cache_overhead_percent: { + description: "Percentage overhead." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_overhead_percent" + } + mongod_wiredtiger_cache_pages: { + description: "Pages in the cache." + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Pages type." + required: true + examples: ["total", "dirty"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_pages" + } + mongod_wiredtiger_cache_pages_total: { + description: "Pages in the cache." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Pages type." + required: true + examples: ["read", "write"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_cache_pages_total" + } + mongod_wiredtiger_concurrent_transactions_available_tickets: { + description: "Information on the number of concurrent of read and write transactions allowed into the WiredTiger storage engine" + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Transactions type." + required: true + examples: ["read", "write"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_concurrent_transactions_available_tickets" + } + mongod_wiredtiger_concurrent_transactions_out_tickets: { + description: "Information on the number of concurrent of read and write transactions allowed into the WiredTiger storage engine" + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Transactions type." + required: true + examples: ["read", "write"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_concurrent_transactions_out_tickets" + } + mongod_wiredtiger_concurrent_transactions_total_tickets: { + description: "Information on the number of concurrent of read and write transactions allowed into the WiredTiger storage engine" + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Transactions type." + required: true + examples: ["read", "write"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_concurrent_transactions_total_tickets" + } + mongod_wiredtiger_log_bytes_total: { + description: "Statistics on WiredTiger’s write ahead log (i.e. the journal)." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Bytes type." + required: true + examples: ["payload", "written"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_log_bytes_total" + } + mongod_wiredtiger_log_operations_total: { + description: "Statistics on WiredTiger’s write ahead log (i.e. the journal)." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Operations type." + required: true + examples: ["write", "scan", "scan_double", "sync", "sync_dir", "flush"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_log_operations_total" + } + mongod_wiredtiger_log_records_scanned_total: { + description: "Statistics on WiredTiger’s write ahead log (i.e. the journal)." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Scanned records type." + required: true + examples: ["compressed", "uncompressed"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_log_records_scanned_total" + } + mongod_wiredtiger_log_records_total: { + description: "Statistics on WiredTiger’s write ahead log (i.e. the journal)." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_log_records_total" + } + mongod_wiredtiger_session_open_sessions: { + description: "Open session count." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_session_open_sessions" + } + mongod_wiredtiger_transactions_checkpoint_seconds: { + description: "Statistics on transaction checkpoints and operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Checkpoint type." + required: true + examples: ["min", "max"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_transactions_checkpoint_seconds" + } + mongod_wiredtiger_transactions_checkpoint_seconds_total: { + description: "Statistics on transaction checkpoints and operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_transactions_checkpoint_seconds_total" + } + mongod_wiredtiger_transactions_running_checkpoints: { + description: "Statistics on transaction checkpoints and operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_transactions_running_checkpoints" + } + mongod_wiredtiger_transactions_total: { + description: "Statistics on transaction checkpoints and operations." + relevant_when: "Storage engine is `wiredTiger`." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Transactions type." + required: true + examples: ["begins", "checkpoints", "committed", "rolledback"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongod_wiredtiger_transactions_total" + } + network_bytes_total: { + description: "The number of bytes that reflects the amount of network traffic." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + state: { + name: "state" + description: "Bytes state." + required: true + examples: ["bytes_in", "bytes_out"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "network_bytes_total" + } + network_metrics_num_requests_total: { + description: "The total number of distinct requests that the server has received." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "network_metrics_num_requests_total" + } + mongodb_op_counters_repl_total: { + description: "Database replication operations by type since the mongod instance last started." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Counter type." + required: true + examples: ["insert", "query", "update", "delete", "getmore", "command"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongodb_op_counters_repl_total" + } + mongodb_op_counters_total: { + description: "Database operations by type since the mongod instance last started." + type: "counter" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + type: { + name: "type" + description: "Counter type." + required: true + examples: ["insert", "query", "update", "delete", "getmore", "command"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "mongodb_op_counters_total" + } + up: { + description: "If the MongoDB server is up or not." + type: "gauge" + default_namespace: "mongodb" + tags: { + endpoint: { + name: "endpoint" + description: "The absolute path of the originating file." + required: true + examples: ["mongodb://localhost:27017"] + } + host: { + name: "host" + description: "The hostname of the MongoDB server." + required: true + examples: ["my-host.local"] + } + } + name: "up" + } + } + } + } + statsd: { + kind: "source" + title: "StatsD" + classes: { + commonly_used: false + delivery: "best_effort" + deployment_roles: ["aggregator"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "StatsD" + thing: "StatsD" + url: "https://github.com/statsd/statsd" + versions: null + } + interface: { + socket: { + api: { + title: "StatsD" + url: "https://github.com/b/statsd_spec" + } + direction: "incoming" + port: 8125 + protocols: ["udp"] + ssl: "optional" + } + } + } + receive_buffer_bytes: { + enabled: true + relevant_when: "mode = `tcp` or mode = `udp` && os = `unix`" + } + keepalive: { + enabled: true + } + tls: { + enabled: false + } + } + descriptions: { + receive_context: "Enriches data with useful StatsD context." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to listen for connections on, or `systemd#N` to use the Nth socket passed by systemd socket activation. If an address is used it _must_ include a port." + name: "address" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:8125", "systemd", "systemd#3"] + syntax: "literal" + } + } + } + mode: { + description: "The type of socket to use." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + tcp: "TCP Socket." + udp: "UDP Socket." + unix: "Unix Domain Socket." + } + examples: ["tcp", "udp", "unix"] + syntax: "literal" + } + } + } + path: { + description: "The unix socket path. *This should be an absolute path*." + name: "path" + relevant_when: "mode = `unix`" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/socket"] + syntax: "literal" + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the source." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + shutdown_timeout_secs: { + common: false + description: "The timeout before a connection is forcefully closed during shutdown." + name: "shutdown_timeout_secs" + relevant_when: "mode = `tcp`" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + statsd: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "statsd" + #ExampleConfig: { + title: string + configuration: { + address: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + counter: { + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "counter" + type: "counter" + default_namespace: "vector" + } + distribution: { + description: """ + A distribution represents a distribution of + sampled values. It is used with services + that support global histograms and summaries. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "distribution" + type: "distribution" + default_namespace: "vector" + } + gauge: { + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "gauge" + type: "gauge" + default_namespace: "vector" + } + set: { + description: "A set represents an array of unique values." + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "set" + type: "gauge" + default_namespace: "vector" + } + } + } + how_it_works: { + timestamps: { + #Subsection: { + title: string + body: string + } + name: "timestamps" + title: "Timestamps" + body: """ + StatsD protocol does not provide support for sending metric + timestamps. You'll notice that each parsed metric is assigned a + `null` timestamp, which is a special value which means "a real + time metric", i.e. not a historical one. Normally such `null` + timestamps will be substituted by current time by downstream + sinks or 3rd party services during sending/ingestion. See the + [metric][docs.data-model.metric] data model page for more info. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `statsd` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + connection_errors_total: { + description: "The total number of connection errors for this Vector instance." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_errors_total" + } + invalid_record_total: { + description: "The total number of invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_total" + } + invalid_record_bytes_total: { + description: "The total number of bytes from invalid records that have been discarded." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "invalid_record_bytes_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + } + } + } + vector: { + kind: "source" + title: "Vector" + description: "Receives data from another upstream Vector instance\tusing the Vector sink." + classes: { + commonly_used: false + delivery: "best_effort" + deployment_roles: ["aggregator"] + development: "beta" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "Vector" + thing: "a Vector instance" + url: "https://vector.dev/docs/" + versions: ">= 0.11.0" + connect_to: { + splunk: { + logs: { + setup: [{ + title: "Create a Splunk HEC endpoint" + description: "Follow the Splunk HEC setup docs to create a Splunk HEC endpoint." + detour: { + url: "https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector" + } + }, { + title: "Configure Vector" + description: """ + Splunk will provide you with a host and token. Copy those + values to the `host` and `token` options. + """ + vector: { + configure: { + sinks: { + splunk_hec: { + type: "splunk_hec" + host: "" + token: "" + } + } + } + } + }] + } + } + } + } + interface: { + socket: { + direction: "incoming" + port: 9000 + protocols: ["tcp"] + ssl: "optional" + } + } + } + receive_buffer_bytes: { + enabled: true + } + keepalive: { + enabled: true + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful Vector context." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The TCP address to listen for connections on, or `systemd#N to use the Nth socket passed by systemd socket activation. If an address is used it _must_ include a port." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:9000", "systemd", "systemd#1"] + syntax: "literal" + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the source." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + shutdown_timeout_secs: { + common: false + description: "The timeout before a connection is forcefully closed during shutdown." + name: "shutdown_timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + vector: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "vector" + #ExampleConfig: { + title: string + configuration: { + address: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + event: { + description: "A Vector event" + name: "event" + fields: { + "*": { + description: "Vector transparently forwards data from another upstream Vector instance. The `vector` source will not modify or add fields." + name: "*" + required: true + warnings: [] + type: { + "*": {} + } + } + } + } + } + metrics: { + counter: { + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "counter" + type: "counter" + default_namespace: "vector" + } + distribution: { + description: """ + A distribution represents a distribution of + sampled values. It is used with services + that support global histograms and summaries. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "distribution" + type: "distribution" + default_namespace: "vector" + } + gauge: { + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "gauge" + type: "gauge" + default_namespace: "vector" + } + histogram: { + description: """ + Also called a "timer". A histogram samples + observations (usually things like request + durations or response sizes) and counts them + in configurable buckets. It also provides a + sum of all observed values. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "histogram" + type: "gauge" + default_namespace: "vector" + } + set: { + description: "A set represents an array of unique values." + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "set" + type: "gauge" + default_namespace: "vector" + } + } + } + how_it_works: { + encoding: { + #Subsection: { + title: string + body: string + } + name: "encoding" + title: "Encoding" + body: """ + Data is encoded via Vector's [event protobuf](https://github.com/timberio/vector/blob/master/proto/event.proto) + before it is sent over the wire. + """ + } + communication_protocol: { + #Subsection: { + title: string + body: string + } + name: "communication_protocol" + title: "Communication Protocol" + body: """ + Upstream Vector instances forward data to downstream Vector + instances via the TCP protocol. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + message_acknowledgement: { + #Subsection: { + title: string + body: string + } + name: "message_acknowledgement" + title: "Message Acknowledgement" + body: """ + Currently, Vector does not perform any application level message + acknowledgement. While rare, this means the individual message + could be lost. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `vector` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + protobuf_decode_errors_total: { + description: "The total number of [Protocol Buffers](https://developers.google.com/protocol-buffers) errors thrown during communication between Vector instances." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "protobuf_decode_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + postgresql_metrics: { + kind: "source" + title: "PostgreSQL Metrics" + description: "[PostgreSQL][urls.postgresql] PostgreSQL is a powerful, open source object-relational database system with over 30 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance." + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "PostgreSQL Server" + thing: "an PostgreSQL Server" + url: "https://www.postgresql.org/" + versions: "9.6-13" + } + interface: { + socket: { + direction: "outgoing" + protocols: ["tcp", "unix"] + ssl: "optional" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful PostgreSQL Server context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoints: { + description: "PostgreSQL server endpoint in libpq-style connection strings." + name: "endpoints" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["postgresql://postgres:vector@localhost:5432/postgres"] + syntax: "literal" + } + } + } + } + } + } + scrape_interval_secs: { + description: "The interval between scrapes." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + namespace: { + description: "The namespace of metrics. Disabled if empty." + common: false + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "postgresql" + syntax: "literal" + } + } + } + include_databases: { + description: """ + A list of databases to match (by using [POSIX Regular Expressions][urls.postgresql_matching]) against + the `datname` column for which you want to collect metrics from. + If not set, metrics will be collected from all databases. + Specifying `""` will include metrics where `datname` is `NULL`. + This can be used in conjunction with [`exclude_databases`](#exclude_databases). + """ + common: false + name: "include_databases" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["^postgres$", "^vector$", "^foo"] + syntax: "literal" + } + } + } + } + } + } + exclude_databases: { + description: """ + A list of databases to match (by using [POSIX Regular Expressions][urls.postgresql_matching]) against + the `datname` column for which you don't want to collect metrics from. + Specifying `""` will include metrics where `datname` is `NULL`. + This can be used in conjunction with [`include_databases`](#include_databases). + """ + common: false + name: "exclude_databases" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["^postgres$", "^template.*", ""] + syntax: "literal" + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "TLS options to connect to the PostgreSQL Server." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + description: "Path to CA certificate file." + name: "ca_file" + required: true + warnings: [] + type: { + string: { + examples: ["certs/ca.pem"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + postgresql_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "postgresql_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoints: null + scrape_interval_secs: null + namespace: null + include_databases: null + exclude_databases: null + tls: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + how_it_works: { + privileges: { + #Subsection: { + title: string + body: string + } + name: "privileges" + title: "Required Privileges" + body: """ + PostgreSQL Metrics component collects metrics by making queries to the configured PostgreSQL server. + Ensure the configured user is allowed to make the select queries against the following views: + + - `pg_stat_database` + - `pg_stat_database_conflicts` + - `pg_stat_bgwriter` + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `postgresql_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + collect_completed_total: { + description: "The total number of metrics collections completed for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_completed_total" + } + collect_duration_nanoseconds: { + description: "The duration spent collecting of metrics for this component." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "collect_duration_nanoseconds" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + request_errors_total: { + description: "The total number of requests errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_errors_total" + } + } + } + output: { + metrics: { + up: { + description: "Whether the PostgreSQL server is up or not." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "up" + } + pg_stat_database_datid: { + description: "OID of this database, or 0 for objects belonging to a shared relation." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_datid" + } + pg_stat_database_numbackends: { + description: "Number of backends currently connected to this database, or 0 for shared objects. This is the only column in this view that returns a value reflecting current state; all other columns return the accumulated values since the last reset." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_numbackends" + } + pg_stat_database_xact_commit_total: { + description: "Number of transactions in this database that have been committed." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_xact_commit_total" + } + pg_stat_database_xact_rollback_total: { + description: "Number of transactions in this database that have been rolled back." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_xact_rollback_total" + } + pg_stat_database_blks_read_total: { + description: "Number of disk blocks read in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_blks_read_total" + } + pg_stat_database_blks_hit_total: { + description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_blks_hit_total" + } + pg_stat_database_tup_returned_total: { + description: "Number of rows returned by queries in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_tup_returned_total" + } + pg_stat_database_tup_fetched_total: { + description: "Number of rows fetched by queries in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_tup_fetched_total" + } + pg_stat_database_tup_inserted_total: { + description: "Number of rows inserted by queries in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_tup_inserted_total" + } + pg_stat_database_tup_updated_total: { + description: "Number of rows updated by queries in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_tup_updated_total" + } + pg_stat_database_tup_deleted_total: { + description: "Number of rows deleted by queries in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_tup_deleted_total" + } + pg_stat_database_conflicts_total: { + description: "Number of queries canceled due to conflicts with recovery in this database. (Conflicts occur only on standby servers; see `pg_stat_database_conflicts` for details.)" + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_total" + } + pg_stat_database_temp_files_total: { + description: "Number of temporary files created by queries in this database. All temporary files are counted, regardless of why the temporary file was created (e.g., sorting or hashing), and regardless of the `log_temp_files` setting." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_temp_files_total" + } + pg_stat_database_temp_bytes_total: { + description: "Total amount of data written to temporary files by queries in this database. All temporary files are counted, regardless of why the temporary file was created, and regardless of the `log_temp_files` setting." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_temp_bytes_total" + } + pg_stat_database_deadlocks_total: { + description: "Number of deadlocks detected in this database." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_deadlocks_total" + } + pg_stat_database_checksum_failures_total: { + description: "Number of data page checksum failures detected in this database (or on a shared object), or 0 if data checksums are not enabled." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_checksum_failures_total" + } + pg_stat_database_checksum_last_failure: { + description: "Time at which the last data page checksum failure was detected in this database (or on a shared object), or 0 if data checksums are not enabled." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_checksum_last_failure" + } + pg_stat_database_blk_read_time_seconds_total: { + description: "Time spent reading data file blocks by backends in this database, in milliseconds (if `track_io_timing` is enabled, otherwise zero)." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_blk_read_time_seconds_total" + } + pg_stat_database_blk_write_time_seconds_total: { + description: "Time spent writing data file blocks by backends in this database, in milliseconds (if `track_io_timing` is enabled, otherwise zero)." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_blk_write_time_seconds_total" + } + pg_stat_database_stats_reset: { + description: "Time at which these statistics were last reset." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_stats_reset" + } + pg_stat_database_conflicts_confl_tablespace_total: { + description: "Number of queries in this database that have been canceled due to dropped tablespaces." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_confl_tablespace_total" + } + pg_stat_database_conflicts_confl_lock_total: { + description: "Number of queries in this database that have been canceled due to lock timeouts." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_confl_lock_total" + } + pg_stat_database_conflicts_confl_snapshot_total: { + description: "Number of queries in this database that have been canceled due to old snapshots." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_confl_snapshot_total" + } + pg_stat_database_conflicts_confl_bufferpin_total: { + description: "Number of queries in this database that have been canceled due to pinned buffers." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_confl_bufferpin_total" + } + pg_stat_database_conflicts_confl_deadlock_total: { + description: "Number of queries in this database that have been canceled due to deadlocks." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + type: { + name: "type" + description: "Database name." + required: true + examples: ["postgres"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_database_conflicts_confl_deadlock_total" + } + pg_stat_bgwriter_checkpoints_timed_total: { + description: "Number of scheduled checkpoints that have been performed." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_checkpoints_timed_total" + } + pg_stat_bgwriter_checkpoints_req_total: { + description: "Number of requested checkpoints that have been performed." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_checkpoints_req_total" + } + pg_stat_bgwriter_checkpoint_write_time_seconds_total: { + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_checkpoint_write_time_seconds_total" + } + pg_stat_bgwriter_checkpoint_sync_time_seconds_total: { + description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_checkpoint_sync_time_seconds_total" + } + pg_stat_bgwriter_buffers_checkpoint_total: { + description: "Number of buffers written during checkpoints." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_buffers_checkpoint_total" + } + pg_stat_bgwriter_buffers_clean_total: { + description: "Number of buffers written by the background writer." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_buffers_clean_total" + } + pg_stat_bgwriter_maxwritten_clean_total: { + description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_maxwritten_clean_total" + } + pg_stat_bgwriter_buffers_backend_total: { + description: "Number of buffers written directly by a backend." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_buffers_backend_total" + } + pg_stat_bgwriter_buffers_backend_fsync_total: { + description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_buffers_backend_fsync_total" + } + pg_stat_bgwriter_buffers_alloc_total: { + description: "Number of buffers allocated." + type: "counter" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_buffers_alloc_total" + } + pg_stat_bgwriter_stats_reset: { + description: "Time at which these statistics were last reset." + type: "gauge" + default_namespace: "postgresql" + tags: { + endpoint: { + name: "endpoint" + description: "PostgreSQL endpoint." + required: true + examples: ["postgresql:///postgres?host=localhost&port=5432"] + } + host: { + name: "host" + description: "The hostname of the PostgreSQL server." + required: true + examples: ["my-host.local"] + } + } + name: "pg_stat_bgwriter_stats_reset" + } + } + } + } + kubernetes_logs: { + kind: "source" + title: "Kubernetes Logs" + description: """ + Collects all log data for Kubernetes Nodes, automatically enriching data + with Kubernetes metadata via the Kubernetes API. + """ + classes: { + commonly_used: true + delivery: "best_effort" + deployment_roles: ["daemon"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: true + } + from: { + service: { + name: "Kubernetes" + thing: "a Kubernetes cluster" + url: "https://kubernetes.io" + versions: ">= 1.14" + } + interface: { + file_system: { + directory: "/var/log" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Kubernetes context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: "kubernetes" + } + configuration: { + annotation_fields: { + common: false + category: "Annotation_fields" + description: "Configuration for how the events are annotated with Pod metadata." + name: "annotation_fields" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + container_image: { + common: false + description: "Event field for Container image." + name: "container_image" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.container_image" + syntax: "literal" + } + } + } + container_name: { + common: false + description: "Event field for Container name." + name: "container_name" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.container_name" + syntax: "literal" + } + } + } + pod_ip: { + common: false + description: "Event field for Pod IPv4 Address." + name: "pod_ip" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_ip" + syntax: "literal" + } + } + } + pod_ips: { + common: false + description: "Event field for Pod IPv4 and IPv6 Addresses." + name: "pod_ips" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_ips" + syntax: "literal" + } + } + } + pod_labels: { + common: false + description: "Event field for Pod labels." + name: "pod_labels" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_labels" + syntax: "literal" + } + } + } + pod_name: { + common: false + description: "Event field for Pod name." + name: "pod_name" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_name" + syntax: "literal" + } + } + } + pod_namespace: { + common: false + description: "Event field for Pod namespace." + name: "pod_namespace" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_namespace" + syntax: "literal" + } + } + } + pod_node_name: { + common: false + description: "Event field for Pod node_name." + name: "pod_node_name" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_node_name" + syntax: "literal" + } + } + } + pod_uid: { + common: false + description: "Event field for Pod uid." + name: "pod_uid" + required: false + warnings: [] + type: { + string: { + default: "kubernetes.pod_uid" + syntax: "literal" + } + } + } + } + } + } + } + auto_partial_merge: { + common: false + description: "Automatically merge partial messages into a single event. Partial here is in respect to messages that were split by the Kubernetes Container Runtime log driver." + name: "auto_partial_merge" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + self_node_name: { + common: false + description: "The name of the Kubernetes `Node` this Vector instance runs at. Configured to use an env var by default, to be evaluated to a value provided by Kubernetes at Pod deploy time." + name: "self_node_name" + required: false + warnings: [] + type: { + string: { + default: "${VECTOR_SELF_NODE_NAME}" + syntax: "literal" + } + } + } + exclude_paths_glob_patterns: { + common: false + description: "A list of glob patterns to exclude from reading the files." + name: "exclude_paths_glob_patterns" + required: false + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["**/exclude/**"] + syntax: "literal" + } + } + } + } + } + } + extra_field_selector: { + common: false + description: """ + Specifies the field selector to filter `Pod`s with, to be used in addition to the built-in `Node` filter. + The name of the Kubernetes `Node` this Vector instance runs at. Configured to use an env var by default, to be evaluated to a value provided by Kubernetes at Pod deploy time. + """ + name: "extra_field_selector" + required: false + warnings: [] + type: { + string: { + default: "" + examples: ["metadata.name!=pod-name-to-exclude", "metadata.name!=pod-name-to-exclude,metadata.name=mypod"] + syntax: "literal" + } + } + } + extra_label_selector: { + common: false + description: """ + Specifies the label selector to filter `Pod`s with, to be used in + addition to the built-in `vector.dev/exclude` filter. + """ + name: "extra_label_selector" + required: false + warnings: [] + type: { + string: { + default: "" + examples: ["my_custom_label!=my_value", "my_custom_label!=my_value,my_other_custom_label=my_value"] + syntax: "literal" + } + } + } + data_dir: { + common: false + description: "The directory used to persist file checkpoint positions. By default, the global `data_dir` option is used. Please make sure the Vector project has write permissions to this dir." + name: "data_dir" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/var/lib/vector"] + syntax: "file_system_path" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + kubernetes_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "kubernetes_logs" + #ExampleConfig: { + title: string + configuration: { + annotation_fields: null + auto_partial_merge: null + self_node_name: null + exclude_paths_glob_patterns: null + extra_field_selector: null + extra_label_selector: null + timezone: null + type: null + data_dir: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "An individual line from a `Pod` log file." + name: "line" + fields: { + file: { + description: "The absolute path of originating file." + name: "file" + required: true + warnings: [] + type: { + string: { + examples: ["/var/log/pods/pod-namespace_pod-name_pod-uid/container/1.log"] + syntax: "literal" + } + } + } + "kubernetes.container_image": { + description: "Container image." + name: "kubernetes.container_image" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["busybox:1.30"] + default: null + syntax: "literal" + } + } + } + "kubernetes.container_name": { + description: "Container name." + name: "kubernetes.container_name" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["coredns"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_ip": { + description: "Pod IPv4 address." + name: "kubernetes.pod_ip" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["192.168.1.1"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_ips": { + description: "Pod IPv4 and IPv6 addresses." + name: "kubernetes.pod_ips" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["192.168.1.1", "::1"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_labels": { + category: "Kubernetes.pod_labels" + description: "Pod labels name." + name: "kubernetes.pod_labels" + required: false + common: true + warnings: [] + type: { + object: { + examples: [{ + mylabel: "myvalue" + }] + options: {} + } + } + } + "kubernetes.pod_name": { + description: "Pod name." + name: "kubernetes.pod_name" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["coredns-qwertyuiop-qwert"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_namespace": { + description: "Pod namespace." + name: "kubernetes.pod_namespace" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["kube-system"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_node_name": { + description: "Pod node name." + name: "kubernetes.pod_node_name" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["minikube"] + default: null + syntax: "literal" + } + } + } + "kubernetes.pod_uid": { + description: "Pod uid." + name: "kubernetes.pod_uid" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["ba46d8c9-9541-4f6b-bbf9-d23b36f2f136"] + default: null + syntax: "literal" + } + } + } + message: { + description: "The raw line from the Pod log file." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308"] + syntax: "literal" + } + } + } + source_type: { + description: "The name of the source type." + name: "source_type" + required: true + warnings: [] + type: { + string: { + examples: ["kubernetes_logs"] + syntax: "literal" + } + } + } + stream: { + description: "The name of the stream the log line was sumbitted to." + name: "stream" + required: true + warnings: [] + type: { + string: { + examples: ["stdout", "stderr"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + examples: [{ + title: "Sample Output" + configuration: { + annotation_fields: null + auto_partial_merge: null + self_node_name: null + exclude_paths_glob_patterns: null + extra_field_selector: null + extra_label_selector: null + timezone: null + type: null + data_dir: null + } + input: """ + ```text + F1015 11:01:46.499073 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: connect: network is unreachable + ``` + """ + output: { + log: { + file: "/var/log/pods/kube-system_storage-provisioner_93bde4d0-9731-4785-a80e-cd27ba8ad7c2/storage-provisioner/1.log" + "kubernetes.container_image": "gcr.io/k8s-minikube/storage-provisioner:v3" + "kubernetes.container_name": "storage-provisioner" + "kubernetes.pod_ip": "192.168.1.1" + "kubernetes.pod_ips": ["192.168.1.1", "::1"] + "kubernetes.pod_labels": { + "addonmanager.kubernetes.io/mode": "Reconcile" + "gcp-auth-skip-secret": "true" + "integration-test": "storage-provisioner" + } + "kubernetes.pod_name": "storage-provisioner" + "kubernetes.pod_namespace": "kube-system" + "kubernetes.pod_node_name": "minikube" + "kubernetes.pod_uid": "93bde4d0-9731-4785-a80e-cd27ba8ad7c2" + message: "F1015 11:01:46.499073 1 main.go:39] error getting server version: Get \"https://10.96.0.1:443/version?timeout=32s\": dial tcp 10.96.0.1:443: connect: network is unreachable" + source_type: "kubernetes_logs" + stream: "stderr" + timestamp: "2020-10-15T11:01:46.499555308Z" + } + } + }] + how_it_works: { + enrichment: { + #Subsection: { + title: string + body: string + } + name: "enrichment" + title: "Enrichment" + body: """ + Vector will enrich data with Kubernetes context. A comprehensive + list of fields can be found in the + [`kubernetes_logs` source output docs](https://vector.dev/docs/reference/sources/kubernetes_logs/#output). + """ + } + filtering: { + #Subsection: { + title: string + body: string + } + name: "filtering" + title: "Filtering" + body: """ + Vector provides rich filtering options for Kubernetes log collection: + + * Built-in [`Pod`](#pod-exclusion) and [`container`](#container-exclusion) + exclusion rules. + * The `exclude_paths_glob_patterns` option allows you to exclude + Kuberenetes log files by the file name and path. + * The `extra_field_selector` option specifies the field selector to + filter Pods with, to be used in addition to the built-in `Node` filter. + * The `extra_label_selector` option specifies the label selector to + filter `Pod`s with, to be used in addition to the [built-in + `vector.dev/exclude` filter](#pod-exclusion). + """ + } + pod_exclusion: { + #Subsection: { + title: string + body: string + } + name: "pod_exclusion" + title: "Pod exclusion" + body: """ + By default, the [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + will skip logs from the `Pod`s that have a `vector.dev/exclude: "true"` *label*. + You can configure additional exclusion rules via label or field selectors, + see [the available options](https://vector.dev/docs/reference/sources/kubernetes_logs/#configuration). + """ + } + container_exclusion: { + #Subsection: { + title: string + body: string + } + name: "container_exclusion" + title: "Container exclusion" + body: """ + The [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + can skip the logs from the individual `container`s of a particular + `Pod`. Add an *annotation* `vector.dev/exclude-containers` to the + `Pod`, and enumerate the `name`s of all the `container`s to exclude in + the value of the annotation like so: + + ``` + vector.dev/exclude-containers: "container1,container2" + ``` + + This annotation will make Vector skip logs originating from the + `container1` and `container2` of the `Pod` marked with the annotation, + while logs from other `container`s in the `Pod` will still be + collected. + """ + } + kubernetes_api_communication: { + #Subsection: { + title: string + body: string + } + name: "kubernetes_api_communication" + title: "Kubernetes API communication" + body: """ + Vector communicates with the Kubernetes API to enrich the data it collects with + Kubernetes context. Therefore, Vector must have access to communicate with the + [Kubernetes API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). If Vector is running in + a Kubernetes cluster then Vector will connect to that cluster using the + [Kubernetes provided access information](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod). + + In addition to access, Vector implements proper desync handling to ensure + communication is safe and reliable. This ensures that Vector will not overwhelm + the Kubernetes API or compromise its stability. + """ + } + partial_message_merging: { + #Subsection: { + title: string + body: string + } + name: "partial_message_merging" + title: "Partial message merging" + body: """ + Vector, by default, will merge partial messages that are + split due to the Docker size limit. For everything else, it + is recommended to use the [`reduce` + transform](https://vector.dev/docs/reference/transforms/reduce/) which offers + the ability to handle custom merging of things like + stacktraces. + """ + } + pod_removal: { + #Subsection: { + title: string + body: string + } + name: "pod_removal" + title: "Pod removal" + body: """ + To ensure all data is collected, Vector will continue to collect logs from the + `Pod` for some time after its removal. This ensures that Vector obtains some of + the most important data, such as crash details. + """ + } + resource_limits: { + #Subsection: { + title: string + body: string + } + name: "resource_limits" + title: "Resource limits" + body: "Vector recommends the following resource limits." + sub_sections: [{ + title: "Agent resource limits" + body: """ + If deploy Vector as an agent (collecting data for each of your + Nodes), then we recommend the following limits: + + ```yaml + resources: + requests: + memory: "64Mi" + cpu: "500m" + limits: + memory: "1024Mi" + cpu: "6000m" + ``` + + **As with all Kubernetes resource limit recommendations, use these + as a reference point and adjust as ncessary. If your configured + Vector pipeline is complex, you may need more resources. If you + have a pipeline you may need less.** + """ + }] + } + state_management: { + #Subsection: { + title: string + body: string + } + name: "state_management" + title: "State management" + body: null + sub_sections: [{ + title: "Agent state management" + body: """ + For the agent role, Vector stores its state at the host-mapped dir with a static + path, so if it's redeployed it'll continue from where it was interrupted. + """ + }] + } + testing_and_reliability: { + #Subsection: { + title: string + body: string + } + name: "testing_and_reliability" + title: "Testing & reliability" + body: """ + Vector is tested extensively against Kubernetes. In addition to Kubernetes + being Vector's most popular installation method, Vector implements a + comprehensive end-to-end test suite for all minor Kubernetes versions starting + with `1.14. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + checkpointing: { + #Subsection: { + title: string + body: string + } + name: "checkpointing" + title: "Checkpointing" + body: """ + Vector checkpoints the current read position after each + successful read. This ensures that Vector resumes where it left + off if restarted, preventing data from being read twice. The + checkpoint positions are stored in the data directory which is + specified via the global `data_dir` option, but can be overridden + via the `data_dir` option in the file source directly. + """ + } + kubernetes_api_access_control: { + #Subsection: { + title: string + body: string + } + name: "kubernetes_api_access_control" + title: "Kubernetes API access control" + body: """ + Vector requires access to the Kubernetes API. + Specifically, the [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + uses the `/api/v1/pods` endpoint to "watch" the pods from + all namespaces. + + Modern Kubernetes clusters run with RBAC (role-based access control) + scheme. RBAC-enabled clusters require some configuration to grant Vector + the authorization to access the Kubernetes API endpoints.\tAs RBAC is + currently the standard way of controlling access to the Kubernetes API, + we ship the necessary configuration out of the box: see `ClusterRole`, + `ClusterRoleBinding` and a `ServiceAccount` in our `kubectl` YAML + config, and the `rbac` configuration at the Helm chart. + + If your cluster doesn't use any access control scheme\tand doesn't + restrict access to the Kubernetes API, you don't need to do any extra + configuration - Vector willjust work. + + Clusters using legacy ABAC scheme are not officially supported + (although Vector might work if you configure access properly) - + we encourage switching to RBAC. If you use a custom access control + scheme - make sure Vector `Pod`/`ServiceAccount` is granted access to + the `/api/v1/pods` resource. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `kubernetes_logs` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + k8s_format_picker_edge_cases_total: { + description: "The total number of edge cases encountered while picking format of the Kubernetes log message." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_format_picker_edge_cases_total" + } + k8s_docker_format_parse_failures_total: { + description: "The total number of failures to parse a message as a JSON object." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_docker_format_parse_failures_total" + } + k8s_event_annotation_failures_total: { + description: "The total number of failures to annotate Vector events with Kubernetes Pod metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "k8s_event_annotation_failures_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + } + } + } + host_metrics: { + kind: "source" + title: "Host Metrics" + description: """ + Examines system data sources on the local system and generates metrics + describing utilization of various system resources, such as CPU, memory, + disk, and network utilization. + """ + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "a Host (Node)" + thing: "a Host (Node)" + url: "https://en.wikipedia.org/wiki/Host_(network)" + versions: null + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful a Host (Node) context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + notices: [] + requirements: [] + warnings: [] + } + installation: { + platform_name: null + } + env_vars: { + PROCFS_ROOT: { + description: "Sets an arbitrary path to the system's Procfs root. Can be used to expose host metrics from within a container. Unset and uses system `/proc` by default." + name: "PROCFS_ROOT" + common: true + type: { + string: { + default: null + examples: ["/mnt/host/proc"] + syntax: "literal" + } + } + required: false + warnings: [] + } + SYSFS_ROOT: { + description: "Sets an arbitrary path to the system's Sysfs root. Can be used to expose host metrics from within a container. Unset and uses system `/sys` by default." + name: "SYSFS_ROOT" + common: true + type: { + string: { + default: null + examples: ["/mnt/host/sys"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + configuration: { + collectors: { + description: "The list of host metric collector services to use. Defaults to all collectors." + common: true + name: "collectors" + required: false + warnings: [] + type: { + array: { + default: ["cpu", "disk", "filesystem", "load", "host", "memory", "network"] + items: { + type: { + string: { + enum: { + cpu: "Metrics related to CPU utilization." + disk: "Metrics related to disk I/O utilization." + filesystem: "Metrics related to filesystem space utilization." + load: "Load average metrics (UNIX only)." + host: "Metrics related to host" + memory: "Metrics related to memory utilization." + network: "Metrics related to network utilization." + } + examples: ["cpu", "disk", "filesystem", "load", "host", "memory", "network"] + syntax: "literal" + } + } + } + } + } + } + namespace: { + description: "The namespace of metrics. Disabled if empty." + common: false + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + scrape_interval_secs: { + description: "The interval between metric gathering, in seconds." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + disk: { + common: false + category: "Disk" + description: "Options for the \"disk\" metrics collector." + name: "disk" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + devices: { + common: false + category: "Devices" + required: false + description: "Lists of device name patterns to include or exclude." + name: "devices" + warnings: [] + type: { + object: { + examples: [] + options: { + includes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather I/O utilization metrics. + Defaults to including all devices. + The patterns are matched using [globbing](#globbing). + """ + name: "includes" + warnings: [] + type: { + array: { + default: ["*"] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + excludes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather I/O utilization metrics. + Defaults to excluding no devices. + The patterns are matched using [globbing](#globbing). + """ + name: "excludes" + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + filesystem: { + common: false + category: "Filesystem" + description: "Options for the \"filesystem\" metrics collector." + name: "filesystem" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + devices: { + common: false + category: "Devices" + required: false + description: "Lists of device name patterns to include or exclude." + name: "devices" + warnings: [] + type: { + object: { + examples: [] + options: { + includes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather usage metrics. + Defaults to including all devices. + The patterns are matched using [globbing](#globbing). + """ + name: "includes" + warnings: [] + type: { + array: { + default: ["*"] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + excludes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather usage metrics. + Defaults to excluding no devices. + The patterns are matched using [globbing](#globbing). + """ + name: "excludes" + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + filesystems: { + common: false + category: "Filesystems" + required: false + description: "Lists of filesystem name patterns to include or exclude." + name: "filesystems" + warnings: [] + type: { + object: { + examples: [] + options: { + includes: { + required: false + common: false + description: """ + The list of filesystem name patterns for which to gather usage metrics. + Defaults to including all filesystems. + The patterns are matched using [globbing](#globbing). + """ + name: "includes" + warnings: [] + type: { + array: { + default: ["*"] + items: { + type: { + string: { + examples: ["ntfs", "ext*"] + syntax: "literal" + } + } + } + } + } + } + excludes: { + required: false + common: false + description: """ + The list of filesystem name patterns for which to gather usage metrics. + Defaults to excluding no filesystems. + The patterns are matched using [globbing](#globbing). + """ + name: "excludes" + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["ntfs", "ext*"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + mountpoints: { + common: false + category: "Mountpoints" + required: false + description: "Lists of mount point path patterns to include or exclude." + name: "mountpoints" + warnings: [] + type: { + object: { + examples: [] + options: { + includes: { + required: false + common: false + description: """ + The list of mount point path patterns for which to gather usage metrics. + Defaults to including all mount points. + The patterns are matched using [globbing](#globbing). + """ + name: "includes" + warnings: [] + type: { + array: { + default: ["*"] + items: { + type: { + string: { + examples: ["/home", "/raid*"] + syntax: "literal" + } + } + } + } + } + } + excludes: { + required: false + common: false + description: """ + The list of mount point path patterns for which to gather usage metrics. + Defaults to excluding no mount points. + The patterns are matched using [globbing](#globbing). + """ + name: "excludes" + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["/home", "/raid*"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + network: { + common: false + category: "Network" + description: "Options for the \"network\" metrics collector." + name: "network" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + devices: { + common: false + category: "Devices" + required: false + description: "Lists of device name patterns to include or exclude." + name: "devices" + warnings: [] + type: { + object: { + examples: [] + options: { + includes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather network utilization metrics. + Defaults to including all devices. + The patterns are matched using [globbing](#globbing). + """ + name: "includes" + warnings: [] + type: { + array: { + default: ["*"] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + excludes: { + required: false + common: false + description: """ + The list of device name patterns for which to gather network utilization metrics. + Defaults to excluding no devices. + The patterns are matched using [globbing](#globbing). + """ + name: "excludes" + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["sda", "dm-*"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + host_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + type: "host_metrics" + #ExampleConfig: { + title: string + configuration: { + collectors: null + namespace: null + scrape_interval_secs: null + disk: null + filesystem: null + network: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + host_cpu_seconds_total: { + description: "The number of CPU seconds accumulated in different operating modes." + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["cpu"] + required: true + } + cpu: { + name: "cpu" + description: "The index of the CPU core or socket." + required: true + examples: ["1"] + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + mode: { + name: "mode" + description: "Which mode the CPU was running in during the given time." + required: true + examples: ["idle", "system", "user", "nice"] + } + } + name: "host_cpu_seconds_total" + } + disk_read_bytes_total: { + description: "The accumulated number of bytes read in." + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["disk"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + } + name: "disk_read_bytes_total" + } + disk_reads_completed_total: { + description: "The accumulated number of read operations completed." + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["disk"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + } + name: "disk_reads_completed_total" + } + disk_written_bytes_total: { + description: "The accumulated number of bytes written out." + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["disk"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + } + name: "disk_written_bytes_total" + } + disk_writes_completed_total: { + description: "The accumulated number of write operations completed." + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["disk"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + } + name: "disk_writes_completed_total" + } + filesystem_free_bytes: { + description: "The number of bytes free on the named filesystem." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["filesystem"] + required: true + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + filesystem: { + name: "filesystem" + description: "The name of the filesystem type." + required: true + examples: ["ext4", "ntfs"] + } + } + name: "filesystem_free_bytes" + } + filesystem_total_bytes: { + description: "The total number of bytes in the named filesystem." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["filesystem"] + required: true + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + filesystem: { + name: "filesystem" + description: "The name of the filesystem type." + required: true + examples: ["ext4", "ntfs"] + } + } + name: "filesystem_total_bytes" + } + filesystem_used_bytes: { + description: "The number of bytes used on the named filesystem." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["filesystem"] + required: true + } + device: { + name: "device" + description: "The disk device name." + required: true + examples: ["sda", "sda1", "dm-1"] + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + filesystem: { + name: "filesystem" + description: "The name of the filesystem type." + required: true + examples: ["ext4", "ntfs"] + } + } + name: "filesystem_used_bytes" + } + load1: { + description: "System load averaged over the last 1 second." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["loadavg"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + relevant_when: "OS is not Windows" + name: "load1" + } + load5: { + description: "System load averaged over the last 5 seconds." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["loadavg"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + relevant_when: "OS is not Windows" + name: "load5" + } + load15: { + description: "System load averaged over the last 15 seconds." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["loadavg"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + relevant_when: "OS is not Windows" + name: "load15" + } + uptime: { + description: "The number of seconds since the last boot." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["host"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "uptime" + } + boot_time: { + description: "The UNIX timestamp of the last boot." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["host"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "boot_time" + } + memory_active_bytes: { + description: "The number of bytes of active main memory." + relevant_when: "OS is not Windows" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_active_bytes" + } + memory_available_bytes: { + description: "The number of bytes of main memory available." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_available_bytes" + } + memory_buffers_bytes: { + description: "The number of bytes of main memory used by buffers." + relevant_when: "OS is Linux" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_buffers_bytes" + } + memory_cached_bytes: { + description: "The number of bytes of main memory used by cached blocks." + relevant_when: "OS is Linux" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_cached_bytes" + } + memory_free_bytes: { + description: "The number of bytes of main memory not used." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_free_bytes" + } + memory_inactive_bytes: { + description: "The number of bytes of main memory that is not active." + relevant_when: "OS is macOS X" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_inactive_bytes" + } + memory_shared_bytes: { + description: "The number of bytes of main memory shared between processes." + relevant_when: "OS is Linux" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_shared_bytes" + } + memory_swap_free_bytes: { + description: "The number of free bytes of swap space." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_swap_free_bytes" + } + memory_swapped_in_bytes_total: { + description: "The number of bytes that have been swapped in to main memory." + relevant_when: "OS is not Windows" + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_swapped_in_bytes_total" + } + memory_swapped_out_bytes_total: { + description: "The number of bytes that have been swapped out from main memory." + relevant_when: "OS is not Windows" + default_namespace: "host" + type: "counter" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_swapped_out_bytes_total" + } + memory_swap_total_bytes: { + description: "The total number of bytes of swap space." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_swap_total_bytes" + } + memory_swap_used_bytes: { + description: "The number of used bytes of swap space." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_swap_used_bytes" + } + memory_total_bytes: { + description: "The total number of bytes of main memory." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_total_bytes" + } + memory_used_bytes: { + description: "The number of bytes of main memory used by programs or caches." + relevant_when: "OS is Linux" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_used_bytes" + } + memory_wired_bytes: { + description: "The number of wired bytes of main memory." + relevant_when: "OS is macOS X" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["memory"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + } + name: "memory_wired_bytes" + } + network_receive_bytes_total: { + description: "The number of bytes received on this interface." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_receive_bytes_total" + } + network_receive_errs_total: { + description: "The number of errors encountered during receives on this interface." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_receive_errs_total" + } + network_receive_packets_total: { + description: "The number of packets received on this interface." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_receive_packets_total" + } + network_transmit_bytes_total: { + description: "The number of bytes transmitted on this interface." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_transmit_bytes_total" + } + network_transmit_errs_total: { + description: "The number of errors encountered during transmits on this interface." + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_transmit_errs_total" + } + network_transmit_packets_drop_total: { + description: "The number of packets dropped during transmits on this interface." + relevant_when: "OS is not macOS" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_transmit_packets_drop_total" + } + network_transmit_packets_total: { + description: "The number of packets transmitted on this interface." + relevant_when: "OS is not macOS" + default_namespace: "host" + type: "gauge" + tags: { + collector: { + name: "collector" + description: "Which collector this metric comes from." + examples: ["network"] + required: true + } + host: { + name: "host" + description: "The hostname of the originating system." + required: true + examples: ["my-host.local"] + } + device: { + name: "device" + description: "The network interface device name." + required: true + examples: ["eth0", "enp5s3"] + } + } + name: "network_transmit_packets_total" + } + } + } + telemetry: { + metrics: { + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `host_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + aws_s3: { + kind: "source" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + strategy: { + common: false + description: "The strategy to use to consume objects from AWS S3." + name: "strategy" + required: false + warnings: [] + type: { + string: { + default: "sqs" + enum: { + sqs: "Consume S3 objects by polling for bucket notifications sent to an [AWS SQS queue](https://aws.amazon.com/sqs/)." + } + syntax: "literal" + } + } + } + compression: { + common: false + description: "The compression format of the S3 objects.." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "text" + enum: { + auto: "Vector will try to determine the compression format of the object from its: `Content-Encoding` metadata, `Content-Type` metadata, and key suffix (e.g. `.gz`). It will fallback to 'none' if it cannot determine the compression." + gzip: "GZIP format." + zstd: "ZSTD format." + none: "Uncompressed." + } + syntax: "literal" + } + } + } + multiline: { + common: false + category: "Multiline" + description: "Multiline parsing configuration. If not specified, multiline parsing is disabled." + name: "multiline" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + condition_pattern: { + description: "Condition regex pattern to look for. Exact behavior is configured via `mode`." + name: "condition_pattern" + required: true + warnings: [] + sort: 3 + type: { + string: { + examples: ["^[\\s]+", "\\\\$", "^(INFO|ERROR) ", ";$"] + syntax: "regex" + } + } + } + mode: { + description: "Mode of operation, specifies how the `condition_pattern` is interpreted." + name: "mode" + required: true + warnings: [] + sort: 2 + type: { + string: { + enum: { + continue_through: "All consecutive lines matching this pattern are included in the group. The first line (the line that matched the start pattern) does not need to match the `ContinueThrough` pattern. This is useful in cases such as a Java stack trace, where some indicator in the line (such as leading whitespace) indicates that it is an extension of the preceding line." + continue_past: "All consecutive lines matching this pattern, plus one additional line, are included in the group. This is useful in cases where a log message ends with a continuation marker, such as a backslash, indicating that the following line is part of the same message." + halt_before: "All consecutive lines not matching this pattern are included in the group. This is useful where a log line contains a marker indicating that it begins a new message." + halt_with: "All consecutive lines, up to and including the first line matching this pattern, are included in the group. This is useful where a log line ends with a termination marker, such as a semicolon." + } + examples: ["continue_through", "continue_past", "halt_before", "halt_with"] + syntax: "literal" + } + } + } + start_pattern: { + description: "Start regex pattern to look for as a beginning of the message." + name: "start_pattern" + required: true + warnings: [] + sort: 1 + type: { + string: { + examples: ["^[^\\s]", "\\\\$", "^(INFO|ERROR) ", "[^;]$"] + syntax: "regex" + } + } + } + timeout_ms: { + description: "The maximum time to wait for the continuation. Once this timeout is reached, the buffered message is guaranteed to be flushed, even if incomplete." + name: "timeout_ms" + required: true + warnings: [] + sort: 4 + type: { + uint: { + examples: [1_000, 600_000] + unit: "milliseconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + sqs: { + common: true + category: "Sqs" + description: "SQS strategy options. Required if strategy=`sqs`." + name: "sqs" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + poll_secs: { + common: true + description: "How often to poll the queue for new messages in seconds." + name: "poll_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + visibility_timeout_secs: { + common: false + description: "The visibility timeout to use for messages in secords. This controls how long a message is left unavailable when a Vector receives it. If a `vector` does not delete the message before the timeout expires, it will be made reavailable for another consumer; this can happen if, for example, the `vector` process crashes." + name: "visibility_timeout_secs" + required: false + warnings: ["Should be set higher than the length of time it takes to process an individual message to avoid that message being reprocessed."] + type: { + uint: { + default: 300 + unit: "seconds" + } + } + } + delete_message: { + common: true + description: "Whether to delete the message once Vector processes it. It can be useful to set this to `false` to debug or during initial Vector setup." + name: "delete_message" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + queue_url: { + description: "The URL of the SQS queue to receieve bucket notifications from." + name: "queue_url" + required: true + warnings: [] + type: { + string: { + examples: ["https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_s3: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + events: { + #Subsection: { + title: string + body: string + } + name: "events" + title: "Handling events from the `aws_s3` source" + body: """ + This source behaves very similarly to the `file` source in that + it will output one event per line (unless the `multiline` + configuration option is used). + + You will commonly want to use [transforms][urls.vector_transforms] to + parse the data. For example, to parse VPC flow logs sent to S3 you can + chain the `tokenizer` transform: + + ```toml + [transforms.flow_logs] + \ttype = "tokenizer" # required + \tinputs = ["s3"] + \tfield_names = ["version", "account_id", "interface_id", "srcaddr", "dstaddr", "srcport", "dstport", "protocol", "packets", "bytes", "start", "end", "action", "log_status"] + + \ttypes.srcport = "int" + \ttypes.dstport = "int" + \ttypes.packets = "int" + \ttypes.bytes = "int" + \ttypes.start = "timestamp|%s" + \ttypes.end = "timestamp|%s" + ``` + + To parse AWS load balancer logs, the `regex_parser` transform can be used: + + ```toml + [transforms.elasticloadbalancing_fields_parsed] + \ttype = "regex_parser" + \tinputs = ["s3"] + \tregex = '(?x)^ + \t\t\t(?P[\\w]+)[ ] + \t\t\t(?P[\\w:.-]+)[ ] + \t\t\t(?P[^\\s]+)[ ] + \t\t\t(?P[\\d.:-]+)[ ] + \t\t\t(?P[\\d.:-]+)[ ] + \t\t\t(?P[\\d.-]+)[ ] + \t\t\t(?P[\\d.-]+)[ ] + \t\t\t(?P[\\d.-]+)[ ] + \t\t\t(?P[\\d-]+)[ ] + \t\t\t(?P[\\d-]+)[ ] + \t\t\t(?P[\\d-]+)[ ] + \t\t\t(?P[\\d-]+)[ ] + \t\t\t"(?P[\\w-]+)[ ] + \t\t\t(?P[^\\s]+)[ ] + \t\t\t(?P[^"\\s]+)"[ ] + \t\t\t"(?P[^"]+)"[ ] + \t\t\t(?P[^\\s]+)[ ] + \t\t\t(?P[^\\s]+)[ ] + \t\t\t(?P[\\w.:/-]+)[ ] + \t\t\t"(?P[^\\s"]+)"[ ] + \t\t\t"(?P[^\\s"]+)"[ ] + \t\t\t"(?P[\\w:./-]+)"[ ] + \t\t\t(?P[\\d-]+)[ ] + \t\t\t(?P[\\w.:-]+)[ ] + \t\t\t"(?P[\\w,-]+)"[ ] + \t\t\t"(?P[^"]+)"[ ] + \t\t\t"(?P[^"]+)"' + \tfield = "message" + \tdrop_failed = false + + \ttypes.received_bytes = "int" + \ttypes.request_processing_time = "float" + \ttypes.sent_bytes = "int" + \ttypes.target_processing_time = "float" + \ttypes.response_processing_time = "float" + + [transforms.elasticloadbalancing_url_parsed] + \ttype = "regex_parser" + \tinputs = ["elasticloadbalancing_fields_parsed"] + \tregex = '^(?P[\\w]+)://(?P[^\\s:/?#]+)(?::(?P[\\d-]+))?-?(?:/(?P[^\\s?#]*))?(?P\\?[^\\s#]+)?' + \tfield = "request_url" + \tdrop_failed = false + ``` + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `aws_s3` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + title: "AWS S3" + features: { + multiline: { + enabled: true + } + collect: { + tls: { + enabled: false + } + checkpoint: { + enabled: false + } + from: { + service: { + name: "AWS S3" + thing: "an AWS S3 bucket" + url: "https://aws.amazon.com/s3/" + versions: null + description: "[Amazon Simple Storage Service (Amazon S3)](https://aws.amazon.com/s3/) is a scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data and applications on Amazon Web Services. It is very commonly used to store log data." + connect_to: { + vector: { + logs: { + setup: [{ + title: "Create an AWS SQS queue" + description: "Create an AWS SQS queue for Vector to consume bucket notifications from." + detour: { + url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-create-queue.html" + } + }, { + title: "Publish S3 bucket notifications to the queue" + description: """ + Configure S3 to publish Bucket notifications to your previously created SQS queue. + Ensure that it only publishes the following events: + + - PUT + - POST + - COPY + - Multipart upload completed + + These represent object creation events and ensure Vector does not double process + S3 objects. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonS3/latest/dev/ways-to-add-notification-config-to-bucket.html" + } + }, { + title: "Configure Vector" + description: """ + Using the SQS queue URL provided to you by AWS, configure the Vector `aws_s3` + source to use the SQS queue via the `sqs.queue_url` option. + """ + vector: { + configure: { + sources: { + aws_s3: { + type: "aws_s3" + sqs: { + queue_url: "" + } + } + } + } + } + }] + } + } + } + } + } + } + descriptions: { + collect_context: "Enriches data with useful AWS S3 context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + tls_collect: "Securely collects data via Transport Layer Security (TLS)." + multiline: "Merges multi-line logs into one event." + } + } + classes: { + commonly_used: true + deployment_roles: ["aggregator"] + delivery: "at_least_once" + development: "beta" + egress_method: "stream" + stateful: false + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [""" + The AWS S3 source requires a SQS queue configured to receive S3 + bucket notifications for the desired S3 buckets. + """] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + output: { + logs: { + object: { + description: "A line from an S3 object." + name: "object" + fields: { + message: { + description: "A line from the S3 object." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["53.126.150.246 - - [01/Oct/2020:11:25:58 -0400] \"GET /disintermediate HTTP/2.0\" 401 20308"] + syntax: "literal" + } + } + } + timestamp: { + description: "The Last-Modified time of the object. Defaults the current timestamp if this information is missing." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + bucket: { + description: "The bucket of the object the line came from." + name: "bucket" + required: true + warnings: [] + type: { + string: { + examples: ["my-bucket"] + syntax: "literal" + } + } + } + object: { + description: "The object the line came from." + name: "object" + required: true + warnings: [] + type: { + string: { + examples: ["AWSLogs/111111111111/vpcflowlogs/us-east-1/2020/10/26/111111111111_vpcflowlogs_us-east-1_fl-0c5605d9f1baf680d_20201026T1950Z_b1ea4a7a.log.gz"] + syntax: "literal" + } + } + } + region: { + description: "The AWS region bucket is in." + name: "region" + required: true + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + } + } + } + } + type: "aws_s3" + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + strategy: null + compression: null + sqs: null + type: null + multiline: null + } + input: string + output: {} | {} | [{} | {}] | null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/s3/latest/APIReference/API_GetObject.html" + action: "s3:GetObject" + }] + }, { + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html" + required_when: "[`strategy`](#strategy) is set to `sqs`" + action: "sqs:ReceiveMessage" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_DeleteMessage.html" + required_when: "[`strategy`](#strategy) is set to `sqs` and [`delete_message`](#delete_message) is set to `true`" + action: "sqs:DeleteMessage" + }] + }] + } + telemetry: { + metrics: { + sqs_message_delete_failed_total: { + description: "The total number of failures to delete SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_delete_failed_total" + } + sqs_message_delete_succeeded_total: { + description: "The total number of successful deletions of SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_delete_succeeded_total" + } + sqs_message_processing_failed_total: { + description: "The total number of failures to process SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_processing_failed_total" + } + sqs_message_processing_succeeded_total: { + description: "The total number of SQS messages successfully processed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_processing_succeeded_total" + } + sqs_message_receive_failed_total: { + description: "The total number of failures to receive SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_receive_failed_total" + } + sqs_message_receive_succeeded_total: { + description: "The total number of times successfully receiving SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_receive_succeeded_total" + } + sqs_message_received_messages_total: { + description: "The total number of received SQS messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_message_received_messages_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + sqs_s3_event_record_ignored_total: { + description: "The total number of times an S3 record in an SQS message was ignored (for an event that was not `ObjectCreated`)." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + ignore_type: { + name: "ignore_type" + description: "The reason for ignoring the S3 record" + required: true + enum: { + invalid_event_kind: "The kind of invalid event." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "sqs_s3_event_record_ignored_total" + } + } + } + } + socket: { + kind: "source" + title: "Socket" + classes: { + commonly_used: true + delivery: "best_effort" + deployment_roles: ["aggregator", "sidecar"] + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "socket client" + thing: "a socket client" + url: "https://en.wikipedia.org/wiki/Network_socket" + versions: null + } + interface: { + socket: { + direction: "incoming" + port: 9000 + protocols: ["tcp", "unix", "udp"] + ssl: "optional" + } + } + } + receive_buffer_bytes: { + enabled: true + relevant_when: "mode = `tcp` or mode = `udp` && os = `unix`" + } + keepalive: { + enabled: true + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful socket client context." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to listen for connections on, or `systemd#N` to use the Nth socket passed by systemd socket activation. If an address is used it _must_ include a port." + name: "address" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:9000", "systemd", "systemd#3"] + syntax: "literal" + } + } + } + host_key: { + category: "Context" + common: false + description: "The key name added to each event representing the current host. This can also be globally set via the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: "host" + syntax: "literal" + } + } + } + max_length: { + common: true + description: "The maximum bytes size of incoming messages before they are discarded." + name: "max_length" + required: false + warnings: [] + type: { + uint: { + default: 102400 + unit: "bytes" + } + } + } + mode: { + description: "The type of socket to use." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + tcp: "TCP socket." + udp: "UDP socket." + unix_datagram: "Unix domain datagram socket." + unix_stream: "Unix domain stream socket." + } + examples: ["tcp", "udp", "unix_datagram", "unix_stream"] + syntax: "literal" + } + } + } + path: { + description: "The unix socket path. *This should be an absolute path*." + name: "path" + relevant_when: "mode = `unix`" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/socket"] + syntax: "literal" + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the source." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + shutdown_timeout_secs: { + common: false + description: "The timeout before a connection is forcefully closed during shutdown." + name: "shutdown_timeout_secs" + relevant_when: "mode = `tcp``" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + socket: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "socket" + #ExampleConfig: { + title: string + configuration: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + logs: { + line: { + description: "A single socket event." + name: "line" + fields: { + host: { + description: "The local hostname, equivalent to the `gethostname` command." + name: "host" + required: true + warnings: [] + type: { + string: { + examples: ["my-host.local"] + syntax: "literal" + } + } + } + message: { + description: "The raw line, unparsed." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["2019-02-13T19:48:34+00:00 [info] Started GET \"/\" for 127.0.0.1"] + syntax: "literal" + } + } + } + timestamp: { + description: "The exact time the event was ingested into Vector." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + examples: [{ + title: "Socket line" + configuration: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + input: """ + ```text + 2019-02-13T19:48:34+00:00 [info] Started GET "/" for 127.0.0.1 + ``` + """ + output: { + log: { + timestamp: "2020-10-10T17:07:36.452332Z" + message: "2019-02-13T19:48:34+00:00 [info] Started GET \"/\" for 127.0.0.1" + host: "my-host.local" + } + } + }] + telemetry: { + metrics: { + connection_errors_total: { + description: "The total number of connection errors for this Vector instance." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + connection_failed_total: { + description: "The total number of times a connection has failed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_failed_total" + } + connection_established_total: { + description: "The total number of times a connection has been established." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_established_total" + } + connection_send_errors_total: { + description: "The total number of errors sending data via the connection." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_send_errors_total" + } + connection_shutdown_total: { + description: "The total number of times the connection has been shut down." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_shutdown_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `socket` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + prometheus_remote_write: { + kind: "source" + title: "Prometheus Remote Write" + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["daemon", "sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + multiline: { + enabled: false + } + receive: { + from: { + service: { + name: "Prometheus" + thing: "a Prometheus database" + url: "https://prometheus.io/" + versions: null + description: "[Prometheus](https://prometheus.io/) is a pull-based monitoring system that scrapes metrics from configured endpoints, stores them efficiently, and supports a powerful query language to compose dynamic information from a variety of otherwise unrelated data points." + } + interface: { + socket: { + api: { + title: "Prometheus Remote Write" + url: "https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write" + } + direction: "incoming" + port: 9090 + protocols: ["http"] + ssl: "optional" + } + } + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + } + descriptions: { + receive_context: "Enriches data with useful Prometheus context." + tls_receive: "Securely receives data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + address: { + description: "The address to accept connections on. The address _must_ include a port." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:9090"] + syntax: "literal" + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an in-line CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: false + description: "Absolute path to a certificate file used to identify this server, in DER or PEM format (X.509) or PKCS#12, or an in-line certificate in PEM format. If this is set, and is not a PKCS#12 archive, `key_file` must also be set. This is required if `enabled` is set to `true`." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: false + description: "Absolute path to a private key file used to identify this server, in DER or PEM format (PKCS#8), or an in-line private key in PEM format." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: false + description: "Require TLS for incoming connections. If this is set, an identity certificate is also required." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true`, Vector will require a TLS certificate from the connecting host and terminate the connection if the certificate is not valid. If `false` (the default), Vector will not request a certificate from the client." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + auth: { + common: false + category: "Auth" + description: "Options for HTTP Basic Authentication." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + username: { + description: "The basic authentication user name." + name: "username" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + prometheus_remote_write: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "prometheus_remote_write" + #ExampleConfig: { + title: string + configuration: { + address: null + auth: null + type: null + tls: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + counter: { + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "counter" + type: "counter" + default_namespace: "vector" + } + gauge: { + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "gauge" + type: "gauge" + default_namespace: "vector" + } + } + } + how_it_works: { + metric_types: { + #Subsection: { + title: string + body: string + } + name: "metric_types" + title: "Metric type interpretation" + body: """ + The remote_write protocol used by this source transmits + only the metric tags, timestamp, and numerical value. No + explicit information about the original type of the + metric (i.e. counter, histogram, etc) is included. As + such, this source makes a guess as to what the original + metric type was. + + For metrics named with a suffix of `_total`, this source + emits the value as a counter metric. All other metrics + are emitted as gauges. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `prometheus_remote_write` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + http_error_response_total: { + description: "The total number of HTTP error responses for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_error_response_total" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + requests_completed_total: { + description: "The total number of requests completed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_completed_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + request_duration_nanoseconds: { + description: "The total request duration in nanoseconds." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_duration_nanoseconds" + } + } + } + } + aws_ecs_metrics: { + kind: "source" + title: "AWS ECS Metrics" + description: """ + Collects the docker container stats for tasks running in AWS ECS or AWS + Fargate. + """ + classes: { + commonly_used: false + delivery: "at_least_once" + deployment_roles: ["sidecar"] + development: "beta" + egress_method: "batch" + stateful: false + } + features: { + collect: { + checkpoint: { + enabled: false + } + from: { + service: { + name: "Amazon ECS" + thing: "an Amazon ECS container" + url: "https://aws.amazon.com/ecs/" + versions: null + } + interface: { + socket: { + api: { + title: "Amazon ECS task metadata endpoint" + url: "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "disabled" + } + } + } + } + multiline: { + enabled: false + } + descriptions: { + collect_context: "Enriches data with useful Amazon ECS context." + checkpoint: "Efficiently collects data and checkpoints read positions to ensure data is not lost between restarts." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + installation: { + platform_name: null + } + configuration: { + endpoint: { + description: """ + Base URI of the task metadata endpoint. + If empty, the URI will be automatically discovered based on the latest version detected. + The version 2 endpoint base URI is `169.254.170.2/v2/`. + The version 3 endpoint base URI is stored in the environment variable `ECS_CONTAINER_METADATA_URI`. + The version 4 endpoint base URI is stored in the environment variable `ECS_CONTAINER_METADATA_URI_V4`. + """ + common: false + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "${ECS_CONTAINER_METADATA_URI_V4}" + syntax: "literal" + } + } + } + namespace: { + description: "The namespace of the metric. Disabled if empty." + common: true + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "awsecs" + syntax: "literal" + } + } + } + scrape_interval_secs: { + description: "The interval between scrapes, in seconds." + common: true + name: "scrape_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 15 + unit: "seconds" + } + } + } + version: { + description: """ + \tThe version of the metadata endpoint. + \tIf empty, the version will be automatically discovered based on envirionment variables. + """ + common: false + name: "version" + required: false + warnings: [] + type: { + string: { + default: "v4" + enum: { + v4: "When the environment variable `ECS_CONTAINER_METADATA_URI_V4` is defined." + v3: "When fails the v4 check, but the environment variable `ECS_CONTAINER_METADATA_URI` is defined." + v2: "When fails the v4 and v3 checks." + } + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_ecs_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "aws_ecs_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + namespace: null + scrape_interval_secs: null + version: null + type: null + } + input: string + output: {} | {} | [{} | {}] | null + } + output: { + metrics: { + blkio_recursive_io_merged_total: { + description: "Total number of bios/requests merged into requests." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_merged_total" + } + blkio_recursive_io_queued_total: { + description: "Total number of requests queued up at any given instant." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_queued_total" + } + blkio_recursive_io_service_bytes_total: { + description: "Number of bytes transferred to/from the disk." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_service_bytes_total" + } + blkio_recursive_io_service_time_seconds_total: { + description: "Total amount of time in seconds between request dispatch and request completion for the IOs done." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_service_time_seconds_total" + } + blkio_recursive_io_serviced_total: { + description: "Number of IOs completed to/from the disk." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_serviced_total" + } + blkio_recursive_io_time_seconds_total: { + description: "Disk time allocated per device in seconds." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_time_seconds_total" + } + blkio_recursive_io_wait_time_seconds_total: { + description: "Total amount of time in seconds the IOs spent waiting in the scheduler queues for service." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_io_wait_time_seconds_total" + } + blkio_recursive_sectors_total: { + description: "Number of sectors transferred to/from disk." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "Device identified by its major and minor numbers." + required: true + examples: ["202:26368"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + op: { + name: "op" + description: "The operation type." + required: true + examples: ["read", "write", "sync", "async", "total"] + } + } + name: "blkio_recursive_sectors_total" + } + cpu_online_cpus: { + description: "Number of CPU cores." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_online_cpus" + } + cpu_usage_system_jiffies_total: { + description: "Jiffies of CPU time used by the system." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_usage_system_jiffies_total" + } + cpu_usage_usermode_jiffies_total: { + description: "Jiffies of CPU time spent in user mode by the container." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_usage_usermode_jiffies_total" + } + cpu_usage_kernelmode_jiffies_total: { + description: "Jiffies of CPU time spent in kernel mode by the container." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_usage_kernelmode_jiffies_total" + } + cpu_usage_total_jiffies_total: { + description: "Jiffies of CPU time used by the container." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_usage_total_jiffies_total" + } + cpu_throttling_periods_total: { + description: "Number of periods." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_throttling_periods_total" + } + cpu_throttled_periods_total: { + description: "Number of periods throttled." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_throttled_periods_total" + } + cpu_throttled_time_seconds_total: { + description: "Throttling time in seconds." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_throttled_time_seconds_total" + } + cpu_usage_percpu_jiffies_total: { + description: "Jiffies of CPU time used by the container, per CPU core." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + cpu: { + name: "cpu" + description: "CPU core identifier." + required: true + examples: ["0", "1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "cpu_usage_percpu_jiffies_total" + } + memory_used_bytes: { + description: "Memory used by the container, in bytes." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_used_bytes" + } + memory_max_used_bytes: { + description: "Maximum measured memory usage of the container, in bytes." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_max_used_bytes" + } + memory_limit_bytes: { + description: "Memory usage limit of the container, in bytes." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_limit_bytes" + } + memory_active_anonymous_bytes: { + description: "Amount of memory that has been identified as active by the kernel. Anonymous memory is memory that is not linked to disk pages." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_active_anonymous_bytes" + } + memory_active_file_bytes: { + description: "Amount of active file cache memory. Cache memory = active_file + inactive_file + tmpfs." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_active_file_bytes" + } + memory_cache_bytes: { + description: "The amount of memory used by the processes of this cgroup that can be associated with a block on a block device. Also accounts for memory used by tmpfs." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_cache_bytes" + } + memory_dirty_bytes: { + description: "The amount of memory waiting to get written to disk." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_dirty_bytes" + } + memory_inactive_anonymous_bytes: { + description: "Amount of memory that has been identified as inactive by the kernel." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_inactive_anonymous_bytes" + } + memory_inactive_file_bytes: { + description: "Amount of inactive file cache memory." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_inactive_file_bytes" + } + memory_mapped_file_bytes: { + description: "Indicates the amount of memory mapped by the processes in the cgroup. It doesn’t give you information about how much memory is used; it rather tells you how it is used." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_mapped_file_bytes" + } + memory_page_faults_total: { + description: "Number of times that a process of the cgroup triggered a page fault." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_page_faults_total" + } + memory_major_faults_total: { + description: "Number of times that a process of the cgroup triggered a major page fault." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_major_faults_total" + } + memory_page_charged_total: { + description: "Number of charging events to the memory cgroup. Charging events happen each time a page is accounted as either mapped anon page(RSS) or cache page to the cgroup." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_page_charged_total" + } + memory_page_uncharged_total: { + description: "Number of uncharging events to the memory cgroup. Uncharging events happen each time a page is unaccounted from the cgroup." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_page_uncharged_total" + } + memory_rss_bytes: { + description: "The amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_rss_bytes" + } + memory_rss_hugepages_bytes: { + description: "Amount of memory due to anonymous transparent hugepages." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_rss_hugepages_bytes" + } + memory_unevictable_bytes: { + description: "The amount of memory that cannot be reclaimed." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_unevictable_bytes" + } + memory_writeback_bytes: { + description: "The amount of memory from file/anon cache that are queued for syncing to the disk." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_writeback_bytes" + } + memory_total_active_anonymous_bytes: { + description: "Total amount of memory that has been identified as active by the kernel." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_active_anonymous_bytes" + } + memory_total_active_file_bytes: { + description: "Total amount of active file cache memory." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_active_file_bytes" + } + memory_total_cache_bytes: { + description: "Total amount of memory used by the processes of this cgroup that can be associated with a block on a block device." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_cache_bytes" + } + memory_total_dirty_bytes: { + description: "Total amount of memory waiting to get written to disk." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_dirty_bytes" + } + memory_total_inactive_anonymous_bytes: { + description: "Total amount of memory that has been identified as inactive by the kernel." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_inactive_anonymous_bytes" + } + memory_total_inactive_file_bytes: { + description: "Total amount of inactive file cache memory." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_inactive_file_bytes" + } + memory_total_mapped_file_bytes: { + description: "Total amount of memory mapped by the processes in the cgroup." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_mapped_file_bytes" + } + memory_total_page_faults_total: { + description: "Total number of page faults." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_page_faults_total" + } + memory_total_major_faults_total: { + description: "Total number of major page faults." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_major_faults_total" + } + memory_total_page_charged_total: { + description: "Total number of charging events." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_page_charged_total" + } + memory_total_page_uncharged_total: { + description: "Total number of uncharging events." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_page_uncharged_total" + } + memory_total_rss_bytes: { + description: "Total amount of memory that doesn’t correspond to anything on disk: stacks, heaps, and anonymous memory maps." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_rss_bytes" + } + memory_total_rss_hugepages_bytes: { + description: "Total amount of memory due to anonymous transparent hugepages." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_rss_hugepages_bytes" + } + memory_total_unevictable_bytes: { + description: "Total amount of memory that can not be reclaimed." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_unevictable_bytes" + } + memory_total_writeback_bytes: { + description: "Total amount of memory from file/anon cache that are queued for syncing to the disk." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_total_writeback_bytes" + } + memory_hierarchical_memory_limit_bytes: { + description: "The memory limit in place by the hierarchy cgroup." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_hierarchical_memory_limit_bytes" + } + memory_hierarchical_memsw_limit_bytes: { + description: "The memory + swap limit in place by the hierarchy cgroup." + default_namespace: "awsecs" + type: "gauge" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "memory_hierarchical_memsw_limit_bytes" + } + network_receive_bytes_total: { + description: "Bytes received by the container via the network interface." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_receive_bytes_total" + } + network_receive_packets_total: { + description: "Number of packets received by the container via the network interface." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_receive_packets_total" + } + network_receive_packets_drop_total: { + description: "Number of inbound packets dropped by the container." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_receive_packets_drop_total" + } + network_receive_errs_total: { + description: "Errors receiving packets." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_receive_errs_total" + } + network_transmit_bytes_total: { + description: "Bytes sent by the container via the network interface." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_transmit_bytes_total" + } + network_transmit_packets_total: { + description: "Number of packets sent by the container via the network interface." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_transmit_packets_total" + } + network_transmit_packets_drop_total: { + description: "Number of outbound packets dropped by the container." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_transmit_packets_drop_total" + } + network_transmit_errs_total: { + description: "Errors sending packets." + default_namespace: "awsecs" + type: "counter" + tags: { + container_id: { + name: "container_id" + description: "The identifier of the ECS container." + required: true + examples: ["0cf54b87-f0f0-4044-b9d6-20dc54d5c414-4057181352"] + } + device: { + name: "device" + description: "The network interface." + required: true + examples: ["eth1"] + } + container_name: { + name: "container_name" + description: "The name of the ECS container." + required: true + examples: ["myapp"] + } + } + name: "network_transmit_errs_total" + } + } + } + telemetry: { + metrics: { + http_error_response_total: { + description: "The total number of HTTP error responses for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_error_response_total" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + parse_errors_total: { + description: "The total number of errors parsing metrics for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "parse_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + requests_completed_total: { + description: "The total number of requests completed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_completed_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + request_duration_nanoseconds: { + description: "The total request duration in nanoseconds." + type: "histogram" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "request_duration_nanoseconds" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `aws_ecs_metrics` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + } + } + transforms: { + filter: { + kind: "transform" + output: {} + title: "Filter" + description: "Filters events based on a set of conditions." + classes: { + commonly_used: true + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + filter: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + condition: { + description: """ + The condition to be matched against every input event. Only messages that pass the condition will + be forwarded. + """ + name: "condition" + required: true + warnings: [] + type: { + string: { + examples: [#".status_code != 200 && !includes(["info", "debug"], .severity)"#] + syntax: "remap_boolean_expression" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + filter: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "filter" + #ExampleConfig: { + title: string + configuration: { + condition: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Drop debug logs" + configuration: { + condition: '.level == "debug"' + type: null + inputs: null + } + input: [{ + log: { + level: "debug" + message: "I'm a noisy debug log" + } + }, { + log: { + level: "info" + message: "I'm a normal info log" + } + }] + output: [{ + log: { + level: "info" + message: "I'm a normal info log" + } + }] + }] + telemetry: { + metrics: { + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + grok_parser: { + kind: "transform" + output: {} + title: "Grok Parser" + description: "Parses a log field value with [Grok](https://grokdebug.herokuapp.com/)." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "Grok" + url: "https://grokdebug.herokuapp.com/" + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_grok(.message, "%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level} %{GREEDYDATA:message}") + ``` + """] + notices: [""" + Vector uses the Rust [`grok` library](https://github.com/daschl/grok). All patterns + [listed here](https://github.com/daschl/grok/tree/master/patterns) are supported. It is recommended to use + maintained patterns when possible since they will be improved over time by + the community. + """] + } + configuration: { + drop_field: { + common: true + description: "If `true` will drop the specified `field` after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The log field to execute the `pattern` against. Must be a `string` value." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + pattern: { + description: "The [Grok pattern](https://github.com/daschl/grok/tree/master/patterns)" + name: "pattern" + required: true + warnings: [] + type: { + string: { + examples: ["%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level} %{GREEDYDATA:message}"] + syntax: "literal" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + grok_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "grok_parser" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + field: null + pattern: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + available_patterns: { + #Subsection: { + title: string + body: string + } + name: "available_patterns" + title: "Available Patterns" + body: """ + Vector uses the Rust [`grok` library](https://github.com/daschl/grok). All patterns + [listed here](https://github.com/daschl/grok/tree/master/patterns) are supported. It is recommended to use + maintained patterns when possible since they will be improved over time by + the community. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + testing: { + #Subsection: { + title: string + body: string + } + name: "testing" + title: "Testing" + body: "We recommend the [Grok debugger](https://grokdebug.herokuapp.com/) for Grok testing." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + key_value_parser: { + kind: "transform" + output: {} + title: "Key-value Parser" + description: "Loosely parses a log field's value in key-value format." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "KeyValue" + url: null + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_key_value(.message) + ``` + """] + notices: [""" + It is likely that the `key_value` transform will replace the `logfmt_parser` transform + in the future since it offers a more flexible superset of that transform. + """] + } + configuration: { + drop_field: { + common: true + description: "If `true` will drop the specified `field` after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The log field containing key/value pairs to parse. Must be a `string` value." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + field_split: { + common: false + description: "The character(s) to split a key/value pair on which results in a new field with an associated value. Must be a `string` value." + name: "field_split" + required: false + warnings: [] + type: { + string: { + default: "=" + examples: [":", "="] + syntax: "literal" + } + } + } + overwrite_target: { + common: false + description: """ + If `target_field` is set and the log contains a field of the same name + as the target, it will only be overwritten if this is set to `true`. + """ + name: "overwrite_target" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + separator: { + common: false + description: "The character(s) that separate key/value pairs. Must be a `string` value." + name: "separator" + required: false + warnings: [] + type: { + string: { + default: "[whitespace]" + examples: [",", ";", "|"] + syntax: "literal" + } + } + } + target_field: { + common: false + description: """ + If this setting is present, the parsed JSON will be inserted into the + log as a sub-object with this name. + If a field with the same name already exists, the parser will fail and + produce an error. + """ + name: "target_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["root_field", "parent.child"] + syntax: "literal" + } + } + } + trim_key: { + common: false + description: """ + Removes characters from the beginning and end of a key until a character that is not listed. + ex: `=value` would result in `key: value` with this option set to `<>`. + """ + name: "trim_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["<>", "{}"] + syntax: "literal" + } + } + } + trim_value: { + common: false + description: """ + Removes characters from the beginning and end of a value until a character that is not listed. + ex: `key=<<>value>>` would result in `key: value` with this option set to `<>`. + """ + name: "trim_value" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["<>", "{}"] + syntax: "literal" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + key_value_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "key_value_parser" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + field: null + field_split: null + overwrite_target: null + separator: null + target_field: null + trim_key: null + trim_value: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + description: { + #Subsection: { + title: string + body: string + } + name: "description" + title: "Description" + body: """ + The Key Value Parser accepts structured data that can be split on a character, or group of characters, and extracts it into a + json object (dictionary) of key/value pairs. The `separator` option allows you to define the character(s) to perform the initial + splitting of the message into pairs. The `field_split` option allows you to define the character(s) which split the key from the value. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + examples: [{ + title: "Firewall log message" + configuration: { + field: "message" + field_split: ":" + separator: ";" + target_field: "data" + trim_key: "\"" + trim_value: "\"" + drop_field: null + overwrite_target: null + timezone: null + types: null + type: "key_value_parser" + inputs: null + } + input: { + log: { + message: "action:\"Accept\"; flags:\"802832\"; ifdir:\"inbound\"; ifname:\"eth2-05\"; logid:\"6\"; loguid:\"{0x5f0fa4d6,0x1,0x696ac072,0xc28d839a}\";" + } + } + output: { + log: { + message: "action:\"Accept\"; flags:\"802832\"; ifdir:\"inbound\"; ifname:\"eth2-05\"; logid:\"6\"; loguid:\"{0x5f0fa4d6,0x1,0x696ac072,0xc28d839a}\";" + data: { + action: "Accept" + flags: "802832" + ifdir: "inbound" + ifname: "eth2-05" + logid: "6" + loguid: "{0x5f0fa4d6,0x1,0x696ac072,0xc28d839a}" + } + } + } + }] + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + split: { + kind: "transform" + output: {} + title: "Split" + description: "Splits a string field on a defined separator." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = split(.message) + ``` + """] + notices: [] + } + configuration: { + drop_field: { + common: true + description: "If `true` the `field` will be dropped after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The field to apply the split on." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child"] + syntax: "literal" + } + } + } + field_names: { + description: "The field names assigned to the resulting tokens, in order." + name: "field_names" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["timestamp", "level", "message", "parent.child"] + syntax: "literal" + } + } + } + } + } + } + separator: { + common: true + description: "The separator to split the field on. If no separator is given, it will split on all whitespace. 'Whitespace' is defined according to the terms of the [Unicode Derived Core Property `White_Space`](https://en.wikipedia.org/wiki/Unicode_character_property#Whitespace)." + name: "separator" + required: false + warnings: [] + type: { + string: { + default: "[whitespace]" + examples: [","] + syntax: "literal" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + split: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "split" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + field: null + field_names: null + separator: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Split log message" + configuration: { + field: "message" + separator: "," + field_names: ["remote_addr", "user_id", "timestamp", "message", "status", "bytes"] + drop_field: null + timezone: null + type: null + types: { + status: "int" + bytes: "int" + } + inputs: null + } + input: { + log: { + message: "5.86.210.12,zieme4647,19/06/2019:17:20:49 -0400,GET /embrace/supply-chains/dynamic/vertical,201,20574" + } + } + output: { + log: { + remote_addr: "5.86.210.12" + user_id: "zieme4647" + timestamp: "19/06/2019:17:20:49 -0400" + message: "GET /embrace/supply-chains/dynamic/vertical" + status: 201 + bytes: 20574 + } + } + }] + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + metric_to_log: { + kind: "transform" + output: {} + title: "Metric to Log" + description: """ + Converts a metric event into a log event, which can be useful for sending metrics + to log-support downstream components. + """ + classes: { + commonly_used: true + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + convert: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + host_tag: { + common: true + description: "Tag key that identifies the source host." + name: "host_tag" + required: false + warnings: [] + type: { + string: { + default: "hostname" + examples: ["host", "hostname"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + metric_to_log: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "metric_to_log" + #ExampleConfig: { + title: string + configuration: { + host_tag: null + timezone: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Metric To Log" + configuration: { + host_tag: "host" + timezone: null + type: null + inputs: null + } + input: { + metric: { + kind: "absolute" + name: "histogram" + timestamp: "2020-08-01T21:15:47+00:00" + tags: { + host: "my.host.com" + code: "200" + } + histogram: { + buckets: [{ + upper_limit: 1.0 + count: 10 + }, { + upper_limit: 2.0 + count: 20 + }] + count: 30 + sum: 50.0 + } + } + } + output: { + log: { + name: "histogram" + timestamp: "2020-08-01T21:15:47+00:00" + host: "my.host.com" + tags: { + code: "200" + } + kind: "absolute" + histogram: { + buckets: [{ + count: 10 + upper_limit: 1.0 + }, { + count: 20 + upper_limit: 2.0 + }] + count: 30 + sum: 50.0 + } + } + } + }] + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + concat: { + kind: "transform" + output: {} + title: "Concat" + description: "Slices log string fields and joins them into a single field." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = "The severity level is " + .level + ``` + """] + notices: [] + } + configuration: { + items: { + description: "A list of substring definitons in the format of source_field[start..end]. For both start and end negative values are counted from the end of the string." + name: "items" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["first[..3]", "second[-5..]", "third[3..6]"] + syntax: "literal" + } + } + } + } + } + } + joiner: { + common: false + description: "The string that is used to join all items." + name: "joiner" + required: false + warnings: [] + type: { + string: { + default: " " + examples: [" ", ",", "_", "+"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + target: { + description: "The name for the new label." + name: "target" + required: true + warnings: [] + type: { + string: { + examples: ["root_field_name", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + concat: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "concat" + #ExampleConfig: { + title: string + configuration: { + items: null + joiner: null + target: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Date" + configuration: { + items: ["month", "day", "year"] + target: "date" + type: null + joiner: "/" + inputs: null + } + input: { + log: { + message: "Hello world" + month: "12" + day: "25" + year: "2020" + } + } + output: { + log: { + message: "Hello world" + date: "12/25/2020" + month: "12" + day: "25" + year: "2020" + } + } + }] + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + regex_parser: { + kind: "transform" + output: {} + title: "Regex Parser" + description: "Parses a log field's value with a [Regular Expression](https://en.wikipedia.org/wiki/Regular_expression)." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "regular expressions" + url: "https://en.wikipedia.org/wiki/Regular_expression" + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_regex(.message, r'(?P.*?) group') + ``` + """] + notices: [] + } + configuration: { + drop_failed: { + common: true + description: "If the event should be dropped if parsing fails." + name: "drop_failed" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + drop_field: { + common: true + description: "If the specified `field` should be dropped (removed) after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The log field to parse." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child"] + syntax: "literal" + } + } + } + overwrite_target: { + common: false + description: "If `target_field` is set and the log contains a field of the same name as the target, it will only be overwritten if this is set to `true`." + name: "overwrite_target" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + patterns: { + description: "The Regular Expressions to apply. Do not include the leading or trailing `/` in any of the expressions." + name: "patterns" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["^(?P[\\\\w\\\\-:\\\\+]+) (?P\\\\w+) (?P.*)$"] + syntax: "literal" + } + } + } + } + } + } + target_field: { + common: false + description: "If this setting is present, the parsed fields will be inserted into the log as a sub-object with this name. If a field with the same name already exists, the parser will fail and produce an error." + name: "target_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["root_field", "parent.child"] + syntax: "literal" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + regex_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "regex_parser" + #ExampleConfig: { + title: string + configuration: { + drop_failed: null + drop_field: null + field: null + overwrite_target: null + patterns: null + target_field: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Syslog 5424" + configuration: { + field: "message" + patterns: [#"^(?P[\w\.]+) - (?P[\w]+) (?P[\d]+) \[(?P.*)\] "(?P[\w]+) (?P.*)" (?P[\d]+) (?P[\d]+)$"#] + drop_failed: null + drop_field: null + overwrite_target: null + target_field: null + timezone: null + type: null + types: { + bytes_in: "int" + timestamp: "timestamp|%d/%m/%Y:%H:%M:%S %z" + status: "int" + bytes_out: "int" + } + inputs: null + } + input: { + log: { + message: "5.86.210.12 - zieme4647 5667 [19/06/2019:17:20:49 -0400] \"GET /embrace/supply-chains/dynamic/vertical\" 201 20574" + } + } + output: { + log: { + bytes_in: 5667 + host: "5.86.210.12" + user_id: "zieme4647" + timestamp: "2019-06-19T17:20:49-0400" + method: "GET" + path: "/embrace/supply-chains/dynamic/vertical" + status: 201 + bytes_out: 20574 + } + } + }] + how_it_works: { + failed_parsing: { + #Subsection: { + title: string + body: string + } + name: "failed_parsing" + title: "Failed Parsing" + body: "By default, if the input message text does not match any of the configured regular expression patterns, this transform will log an error message but leave the log event unchanged. If you instead wish to have this transform drop the event, set `drop_failed = true`." + } + flags: { + #Subsection: { + title: string + body: string + } + name: "flags" + title: "Flags" + body: """ + Regex flags can be toggled with the `(?flags)` syntax. The available flags are: + + | Flag | Descriuption | + | :--- | :----------- | + | `i` | case-insensitive: letters match both upper and lower case | + | `m` | multi-line mode: ^ and $ match begin/end of line | + | `s` | allow . to match `\\n` | + | `U` | swap the meaning of `x*` and `x*?` | + | `u` | Unicode support (enabled by default) | + | `x` | ignore whitespace and allow line comments (starting with `#`) + + For example, to enable the case-insensitive flag you can write: + + ```text + (?i)Hello world + ``` + + More info can be found in the [Regex grouping and flags documentation](#(urls.regex_grouping_and_flags)). + """ + } + named_captures: { + #Subsection: { + title: string + body: string + } + name: "named_captures" + title: "Named Captures" + body: """ + You can name Regex captures with the `` syntax. For example: + + ```text + ^(?P\\w*) (?P\\w*) (?P.*)$ + ``` + + Will capture `timestamp`, `level`, and `message`. All values are extracted as + `string` values and must be coerced with the `types` table. + + More info can be found in the [Regex grouping and flags + documentation](#(urls.regex_grouping_and_flags)). + """ + } + regex_debugger: { + #Subsection: { + title: string + body: string + } + name: "regex_debugger" + title: "Regex Debugger" + body: "If you are having difficulty with your regular expression not matching text, you may try debugging your patterns at [Regex 101][regex_tester]. This site includes a regular expression tester and debugger. The regular expression engine used by Vector is most similar to the \"Go\" implementation, so make sure that is selected in the \"Flavor\" menu." + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + regex_syntax: { + #Subsection: { + title: string + body: string + } + name: "regex_syntax" + title: "Regex Syntax" + body: "Vector uses the Rust standard regular expression engine for pattern matching. Its syntax shares most of the features of Perl-style regular expressions, with a few exceptions. You can find examples of patterns in the [Rust regex module documentation][rust_regex_syntax]." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + route: { + kind: "transform" + output: {} + title: "Route" + description: """ + Splits a stream of events into multiple sub-streams based on a set of + conditions. + """ + classes: { + commonly_used: false + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + route: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + route: { + category: "Route" + description: """ + A table of route identifiers to logical conditions representing the filter of the route. Each route + can then be referenced as an input by other components with the name `.`. + """ + name: "route" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + "*": { + description: """ + The condition to be matched against every input event. Only messages that pass the + condition will be included in this route. + """ + name: "*" + required: true + warnings: [] + type: { + string: { + examples: [#".status_code != 200 && !includes(["info", "debug"], .severity)"#] + syntax: "remap_boolean_expression" + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + route: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "route" + #ExampleConfig: { + title: string + configuration: { + route: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Split by log level" + configuration: { + route: { + debug: ".level == \"debug\"" + info: ".level == \"info\"" + warn: ".level == \"warn\"" + error: ".level == \"error\"" + } + type: null + inputs: null + } + input: { + log: { + level: "info" + } + } + output: { + log: { + level: "info" + } + } + }] + telemetry: { + metrics: { + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + merge: { + kind: "transform" + output: {} + title: "Merge" + description: "Merges partial log events into a single event." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: true + } + features: { + reduce: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This component has been deprecated in favor of the new + [`reduce` transform](https://vector.dev/docs/reference/transforms/remap/). The `reduce` + transform provides a simple syntax for robust data merging. + Let us know what you think! + """] + notices: [] + } + configuration: { + fields: { + common: true + description: """ + Fields to merge. + The values of these fields will be merged into the first partial event. + Fields not specified here will be ignored. + Merging process takes the first partial event and the base, then it merges in the fields from each successive partial event, until a non-partial event arrives. + Finally, the non-partial event fields are merged in, producing the resulting merged event. + """ + name: "fields" + required: false + warnings: [] + type: { + array: { + default: ["message"] + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "literal" + } + } + } + } + } + } + partial_event_marker_field: { + common: true + description: """ + The field that indicates that the event is partial. + A consequent stream of partial events along with the first non-partial event will be merged together. + """ + name: "partial_event_marker_field" + required: false + warnings: [] + type: { + string: { + default: "_partial" + examples: ["_partial", "parent.child"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + stream_discriminant_fields: { + common: true + description: """ + An ordered list of fields to distinguish streams by. + Each stream has a separate partial event merging state. + Should be used to prevent events from unrelated sources from mixing together, as this affects partial event processing. + """ + name: "stream_discriminant_fields" + required: false + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["host", "parent.child"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + merge: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "merge" + #ExampleConfig: { + title: string + configuration: { + fields: null + partial_event_marker_field: null + stream_discriminant_fields: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Default" + configuration: { + fields: null + partial_event_marker_field: null + stream_discriminant_fields: null + type: null + inputs: null + } + input: [{ + log: { + message: "First" + "_partial": true + custom_string_field: "value1" + custom_int_field: 1 + } + }, { + log: { + message: "Second" + "_partial": true + custom_string_field: "value2" + custom_int_field: 2 + } + }, { + log: { + message: "Third" + custom_string_field: "value3" + custom_int_field: 3 + } + }] + output: { + log: { + message: "FirstSecondThird" + custom_string_field: "value1" + custom_int_field: 1 + } + } + notes: """ + Notice that `custom_string_field` and `custom_int_field` were not overridden. + This is because they were not listed in the `fields` option. + """ + }, { + title: "With Merge Fields" + configuration: { + fields: ["message", "custom_string_field", "custom_int_field"] + partial_event_marker_field: null + stream_discriminant_fields: null + type: null + inputs: null + } + input: [{ + log: { + message: "First" + "_partial": true + custom_string_field: "value1" + custom_int_field: 1 + } + }, { + log: { + message: "Second" + "_partial": true + custom_string_field: "value2" + custom_int_field: 2 + } + }, { + log: { + message: "Third" + custom_string_field: "value3" + custom_int_field: 3 + } + }] + output: { + log: { + message: "FirstSecondThird" + custom_string_field: "value1value2value3" + custom_int_field: 3 + } + } + notes: """ + Notice that `custom_string_field` is concatenated and `custom_int_field` + overridden. This is because it was specified in the `fields` option. + """ + }] + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + json_parser: { + kind: "transform" + output: {} + title: "JSON Parser" + description: "Parses a log field value as [JSON](https://en.wikipedia.org/wiki/JSON)." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "JSON" + url: "https://en.wikipedia.org/wiki/JSON" + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_json(.message) + ``` + """] + notices: [] + } + configuration: { + drop_field: { + common: true + description: "If the specified `field` should be dropped (removed) after parsing. If parsing fails, the field will not be removed, irrespective of this setting." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + drop_invalid: { + description: "If `true` events with invalid JSON will be dropped, otherwise the event will be kept and passed through." + name: "drop_invalid" + required: true + warnings: [] + type: { + bool: {} + } + } + field: { + common: true + description: "The log field to decode as JSON. Must be a `string` value type." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + overwrite_target: { + common: false + description: "If `target_field` is set and the log contains a field of the same name as the target, it will only be overwritten if this is set to `true`." + name: "overwrite_target" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + target_field: { + common: false + description: "If this setting is present, the parsed JSON will be inserted into the log as a sub-object with this name. If a field with the same name already exists, the parser will fail and produce an error." + name: "target_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["root_field", "parent.child"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + json_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "json_parser" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + drop_invalid: null + field: null + overwrite_target: null + target_field: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + invalid_json: { + #Subsection: { + title: string + body: string + } + name: "invalid_json" + title: "Invalid JSON" + body: """ + If the value for the specified `field` is not valid JSON you can control keeping + or discarding the event with the `drop_invalid` option. Setting it to `true` will + discard the event and drop it entirely. Setting it to `false` will keep the + event and pass it through. Note that passing through the event could cause + problems and violate assumptions about the structure of your event. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + merge_conflicts: { + #Subsection: { + title: string + body: string + } + name: "merge_conflicts" + title: "Merge Conflicts" + body: "" + sub_sections: [{ + title: "Key Conflicts" + body: "Any key present in the decoded JSON will override existing keys in the event." + }, { + title: "Object Conflicts" + body: """ + If the decoded JSON includes nested fields it will be _deep_ merged into the + event. For example, given the following event: + + ```javascript + { + "message": "{"parent": {"child2": "value2"}}", + "parent": { + "child1": "value1" + } + } + ``` + + Parsing the `"message"` field would result the following structure: + + ```javascript + { + "parent": { + "child1": "value1", + "child2": "value2" + } + } + ``` + + Notice that the `parent.child1` key was preserved. + """ + }] + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + wasm: { + kind: "transform" + output: {} + title: "WASM" + description: """ + Process events using the [WASM](https://webassembly.org/) virtual machine, allowing + you to process Vector events with Typescript, Ruby, Java, and [more](https://github.com/appcypher/awesome-wasm-langs). + """ + classes: { + commonly_used: false + development: "beta" + egress_method: "stream" + stateful: true + } + features: { + program: { + runtime: { + name: "WASM" + url: "https://webassembly.org/" + version: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": false + "aarch64-unknown-linux-musl": false + "armv7-unknown-linux-gnueabihf": false + "armv7-unknown-linux-musleabihf": false + "x86_64-apple-darwin": false + "x86_64-pc-windows-msv": false + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": false + } + requirements: [#""" + Vector must be built with the `wasm` feature. *This is not enabled by default. + Review [Building Vector][urls.contributing]*. + """#] + warnings: [] + notices: [""" + Please consider the [`remap` transform](https://vector.dev/docs/reference/transforms/remap/) before using this tranform. The + [Vector Remap Language](https://vector.dev/docs/reference/vrl/) is designed for safe, performant, and easy data mapping. It + is intended to cover the vast majority of data mapping use cases leaving WASM for very advanced and + edge-case situations. + """] + } + configuration: { + artifact_cache: { + description: "The directory where Vector should store the artifact it builds of this WASM module. Typically, all WASM modules share this." + name: "artifact_cache" + required: true + warnings: [] + type: { + string: { + examples: ["/etc/vector/artifacts", "/var/lib/vector/artifacts", "C:\\vector\\artifacts"] + syntax: "file_system_path" + } + } + } + heap_max_size: { + common: false + description: "The maximum size of the heap of this module, in bytes. (This includes the module itself, default is 10 MB.)" + name: "heap_max_size" + required: false + warnings: [] + type: { + uint: { + default: 10485760 + unit: "bytes" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + module: { + description: "The file path of the `.wasm` or `.wat` module." + name: "module" + required: true + warnings: [] + type: { + string: { + examples: ["./modules/example.wasm", "/example.wat", "example.wasm"] + syntax: "file_system_path" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + wasm: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "wasm" + #ExampleConfig: { + title: string + configuration: { + artifact_cache: null + heap_max_size: null + module: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + geoip: { + kind: "transform" + title: "GeoIP" + description: """ + Enrich events with geolocation data from the MaxMind GeoIP2-City, + GeoLite2-City, GeoIP2-ISP and GeoLite2-ASN databases. + """ + classes: { + commonly_used: false + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + enrich: { + from: { + service: { + name: "MaxMind GeoIP2 and GeoLite2 city databases" + url: "https://www.maxmind.com/en/geoip2-isp-database" + versions: ">= 2" + } + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + database: { + description: """ + Path to the [MaxMind GeoIP2](https://dev.maxmind.com/geoip/geoip2/downloadable) or [GeoLite2 binary city + database](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) file (`GeoLite2-City.mmdb`). Other + databases, such as the the country database, are not supported. + """ + name: "database" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/GeoLite2-City.mmdb", "/path/to/GeoLite2-ISP.mmdb"] + syntax: "literal" + } + } + } + source: { + description: "The field name that contains the IP address. This field should contain a valid IPv4 or IPv6 address." + name: "source" + required: true + warnings: [] + type: { + string: { + examples: ["ip_address", "x-forwarded-for", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + target: { + common: true + description: "The default field to insert the resulting GeoIP data into. See [output](#output) for more info." + name: "target" + required: false + warnings: [] + type: { + string: { + default: "geoip" + examples: ["geoip", "parent.child"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + geoip: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "geoip" + #ExampleConfig: { + title: string + configuration: { + database: null + source: null + target: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + supported_databases: { + #Subsection: { + title: string + body: string + } + name: "supported_databases" + title: "Supported MaxMind databases" + body: """ + The `geoip` transform currently supports the following [MaxMind](https://www.maxmind.com/en/home) + databases: + + * [GeoLite2-ASN.mmdb](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) (free) — Determine the + \tautonomous system number and organization associated with an IP address. + * [GeoLite2-City.mmdb](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) (free) — Determine the + \tcountry, subdivisions, city, and postal code associated with IPv4 and IPv6 + \taddresses worldwide. + * [GeoIP2-City.mmdb](https://www.maxmind.com/en/geoip2-city) (paid) — Determine the country, + \tsubdivisions, city, and postal code associated with IPv4 and IPv6 + \taddresses worldwide. + * [GeoIP2-ISP.mmdb](https://www.maxmind.com/en/geoip2-isp-database) (paid) — Determine the Internet + \tService Provider (ISP), organization name, and autonomous system organization + \tand number associated with an IP address. + + The database files should be in the [MaxMind DB file + format](https://maxmind.github.io/MaxMind-DB/). + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + output: { + logs: { + line: { + description: "Geo-enriched log event" + name: "line" + fields: { + geoip: { + category: "Geoip" + description: """ + The root field containing all geolocation data as subfields. Depending on the + database used, either the city or the ISP field is populated. + """ + name: "geoip" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + autonomous_system_number: { + description: """ + The Autonomous System (AS) number associated with the IP address. + Zero if unknown. Available with the + [GeoIP2-ISP](https://www.maxmind.com/en/geoip2-isp-database) or + [GeoLite2-ASN](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database. + """ + required: false + common: false + type: { + uint: { + unit: null + default: null + examples: [701, 721] + } + } + groups: ["ASN", "ISP"] + name: "autonomous_system_number" + warnings: [] + } + autonomous_system_organization: { + description: """ + The organization associated with the registered autonomous system number + for the IP address. Available with the + [GeoIP2-ISP](https://www.maxmind.com/en/geoip2-isp-database) or + [GeoLite2-ASN](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database. + """ + required: false + common: false + type: { + string: { + default: null + examples: ["MCI Communications Services, Inc. d/b/a Verizon Business", "DoD Network Information Center"] + syntax: "literal" + } + } + groups: ["ASN", "ISP"] + name: "autonomous_system_organization" + warnings: [] + } + city_name: { + description: """ + The city name associated with the IP address. Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["New York", "Brooklyn", "Chicago"] + syntax: "literal" + } + } + groups: ["City"] + name: "city_name" + warnings: [] + } + continent_code: { + description: """ + The continent code associated with the IP address. + Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + enum: { + AF: "Africa" + AN: "Antarctica" + AS: "Asia" + EU: "Europe" + NA: "North America" + OC: "Oceania" + SA: "South America" + } + examples: ["AF", "AN", "AS", "EU", "NA", "OC", "SA"] + syntax: "literal" + } + } + groups: ["City"] + name: "continent_code" + warnings: [] + } + country_code: { + description: """ + The [ISO 3166-2 country codes](https://en.wikipedia.org/wiki/ISO_3166-2) associated with + the IP address. Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["US", "US-PR", "FR", "FR-BL", "GB", "A1", "A2"] + syntax: "literal" + } + } + groups: ["City"] + name: "country_code" + warnings: [] + } + isp: { + description: """ + The name of the Internet Service Provider (ISP) associated with the + IP address. Available with the + [GeoIP2-ISP](https://www.maxmind.com/en/geoip2-isp-database) database. + """ + required: false + common: false + type: { + string: { + default: null + examples: ["Verizon Business"] + syntax: "literal" + } + } + groups: ["ISP"] + name: "isp" + warnings: [] + } + latitude: { + description: """ + The latitude associated with the IP address. Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["51.75"] + syntax: "literal" + } + } + groups: ["City"] + name: "latitude" + warnings: [] + } + longitude: { + description: """ + The longitude associated with the IP address. Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["-1.25"] + syntax: "literal" + } + } + groups: ["City"] + name: "longitude" + warnings: [] + } + organization: { + description: """ + The name of the organization associated with the IP address. + Available with the [GeoIP2-ISP](https://www.maxmind.com/en/geoip2-isp-database) + database. + """ + required: false + common: false + type: { + string: { + default: null + examples: ["Verizon Business"] + syntax: "literal" + } + } + groups: ["ISP"] + name: "organization" + warnings: [] + } + postal_code: { + description: """ + The postal code associated with the IP address. Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["07094", "10010", "OX1"] + syntax: "literal" + } + } + groups: ["City"] + name: "postal_code" + warnings: [] + } + timezone: { + description: """ + The timezone associated with the IP address in [IANA time zone + format](https://en.wikipedia.org/wiki/Tz_database#Names_of_time_zones). A full list of time zones + can be found [here](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) Available with the [GeoIP2-City](https://www.maxmind.com/en/geoip2-city) or + [GeoLite2-City](https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access) database.. + """ + required: true + type: { + string: { + examples: ["America/New_York", "Asia/Atyrau", "Europe/London"] + syntax: "literal" + } + } + groups: ["City"] + name: "timezone" + warnings: [] + } + } + } + } + } + } + } + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + remove_fields: { + kind: "transform" + output: {} + title: "Remove Fields" + description: "Removes one or more log fields." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + del(.level) + ``` + """] + notices: [] + } + configuration: { + drop_empty: { + common: false + description: "If set to `true`, after removing fields, remove any parent objects that are now empty." + name: "drop_empty" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + fields: { + description: "The log field names to drop." + name: "fields" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["field1", "field2", "parent.child"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + remove_fields: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "remove_fields" + #ExampleConfig: { + title: string + configuration: { + drop_empty: null + fields: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + add_tags: { + kind: "transform" + output: {} + title: "Add Tags" + description: "Adds tags to metric events." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + #".tag = "value""# + ``` + """] + notices: [] + } + configuration: { + overwrite: { + common: true + description: "By default, fields will be overridden. Set this to `false` to avoid overwriting values." + name: "overwrite" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + tags: { + common: true + category: "Tags" + description: "A table of key/value pairs representing the tags to be added to the metric." + name: "tags" + required: false + warnings: [] + type: { + object: { + examples: [{ + static_tag: "my value" + env_tag: "${ENV_VAR}" + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + add_tags: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "add_tags" + #ExampleConfig: { + title: string + configuration: { + overwrite: null + tags: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + tag_cardinality_limit: { + kind: "transform" + output: {} + title: "Tag Cardinality Limit" + description: """ + Limits the cardinality of tags on metric events, protecting against + accidental high cardinality usage that can commonly disrupt the stability + of metrics storages. + """ + classes: { + commonly_used: false + development: "beta" + egress_method: "stream" + stateful: true + } + features: { + filter: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + cache_size_per_tag: { + common: false + description: "The size of the cache in bytes to use to detect duplicate tags. The bigger the cache the less likely it is to have a 'false positive' or a case where we allow a new value for tag even after we have reached the configured limits." + name: "cache_size_per_tag" + relevant_when: "mode = \"probabilistic\"" + required: false + warnings: [] + type: { + uint: { + default: 5120000 + unit: "bytes" + } + } + } + limit_exceeded_action: { + common: true + description: "Controls what should happen when a metric comes in with a tag that would exceed the configured limit on cardinality." + name: "limit_exceeded_action" + required: false + warnings: [] + type: { + string: { + default: "drop_tag" + enum: { + drop_tag: "Remove tags that would exceed the configured limit from the incoming metric" + drop_event: "Drop any metric events that contain tags that would exceed the configured limit" + } + syntax: "literal" + } + } + } + mode: { + description: "Controls what approach is used internally to keep track of previously seen tags and deterime when a tag on an incoming metric exceeds the limit." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + exact: "Has higher memory requirements than `probabilistic`, but never falsely outputs metrics with new tags after the limit has been hit." + probabilistic: "Has lower memory requirements than `exact`, but may occasionally allow metric events to pass through the transform even when they contain new tags that exceed the configured limit. The rate at which this happens can be controlled by changing the value of `cache_size_per_tag`." + } + examples: ["exact", "probabilistic"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + value_limit: { + common: true + description: "How many distinct values to accept for any given key." + name: "value_limit" + required: false + warnings: [] + type: { + uint: { + default: 500 + unit: null + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + tag_cardinality_limit: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "tag_cardinality_limit" + #ExampleConfig: { + title: string + configuration: { + cache_size_per_tag: null + limit_exceeded_action: null + mode: null + value_limit: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Drop high-cardinality tag" + context: """ + In this example we'll demonstrate how to drop a + high-cardinality tag named `user_id`. Notice that the + second metric's `user_id` tag has been removed. That's + because it exceeded the `value_limit`. + """ + configuration: { + fields: { + value_limit: 1 + limit_exceeded_action: "drop_tag" + } + cache_size_per_tag: null + limit_exceeded_action: null + mode: null + value_limit: null + type: null + inputs: null + } + input: [{ + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: { + user_id: "user_id_1" + } + } + }, { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: { + user_id: "user_id_2" + } + } + }] + output: [{ + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: { + user_id: "user_id_1" + } + } + }, { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: {} + } + }] + }] + how_it_works: { + intended_usage: { + #Subsection: { + title: string + body: string + } + name: "intended_usage" + title: "Intended Usage" + body: """ + This transform is intended to be used as a protection mechanism to prevent + upstream mistakes. Such as a developer accidentally adding a `request_id` + tag. When this is happens, it is recommended to fix the upstream error as soon + as possible. This is because Vector's cardinality cache is held in memory and it + will be erased when Vector is restarted. This will cause new tag values to pass + through until the cardinality limit is reached again. For normal usage this + should not be a common problem since Vector processes are normally long-lived. + """ + } + memory_utilization: { + #Subsection: { + title: string + body: string + } + name: "memory_utilization" + title: "Failed Parsing" + body: """ + This transform stores in memory a copy of the key for every tag on every metric + event seen by this transform. In mode `exact`, a copy of every distinct + value *for each key* is also kept in memory, until `value_limit` distinct values + have been seen for a given key, at which point new values for that key will be + rejected. So to estimate the memory usage of this transform in mode `exact` + you can use the following formula: + + ```text + (number of distinct field names in the tags for your metrics * average length of + the field names for the tags) + (number of distinct field names in the tags of + your metrics * `value_limit` * average length of the values of tags for your + metrics) + ``` + + In mode `probabilistic`, rather than storing all values seen for each key, each + distinct key has a bloom filter which can probabilistically determine whether + a given value has been seen for that key. The formula for estimating memory + usage in mode `probabilistic` is: + + ```text + (number of distinct field names in the tags for your metrics * average length of + the field names for the tags) + (number of distinct field names in the tags of + -your metrics * `cache_size_per_tag`) + ``` + + The `cache_size_per_tag` option controls the size of the bloom filter used + for storing the set of acceptable values for any single key. The larger the + bloom filter the lower the false positive rate, which in our case means the less + likely we are to allow a new tag value that would otherwise violate a + configured limit. If you want to know the exact false positive rate for a given + `cache_size_per_tag` and `value_limit`, there are many free on-line bloom filter + calculators that can answer this. The formula is generally presented in terms of + 'n', 'p', 'k', and 'm' where 'n' is the number of items in the filter + (`value_limit` in our case), 'p' is the probability of false positives (what we + want to solve for), 'k' is the number of hash functions used internally, and 'm' + is the number of bits in the bloom filter. You should be able to provide values + for just 'n' and 'm' and get back the value for 'p' with an optimal 'k' selected + for you. Remember when converting from `value_limit` to the 'm' value to plug + into the calculator that `value_limit` is in bytes, and 'm' is often presented + in bits (1/8 of a byte). + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + restarts: { + #Subsection: { + title: string + body: string + } + name: "restarts" + title: "Restarts" + body: """ + This transform's cache is held in memory, and therefore, restarting Vector + will reset the cache. This means that new values will be passed through until + the cardinality limit is reached again. See [intended usage](#intended-usage) + for more info. + """ + } + } + telemetry: { + metrics: { + tag_value_limit_exceeded_total: { + description: """ + The total number of events discarded because the tag has been rejected after + hitting the configured `value_limit`. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "tag_value_limit_exceeded_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + value_limit_reached_total: { + description: """ + The total number of times new values for a key have been rejected because the + value limit has been reached. + """ + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "value_limit_reached_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + reduce: { + kind: "transform" + output: {} + title: "Reduce" + description: """ + Reduces multiple log events into a single log event based on a set of + conditions and merge strategies. + """ + classes: { + commonly_used: false + development: "beta" + egress_method: "stream" + stateful: true + } + features: { + reduce: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + ends_when: { + common: false + description: """ + A condition used to distinguish the final event of a transaction. If this condition resolves to `true` + for an event, the current transaction is immediately flushed with this event. + """ + name: "ends_when" + required: false + warnings: [] + type: { + string: { + default: null + examples: [#".status_code != 200 && !includes(["info", "debug"], .severity)"#] + syntax: "literal" + } + } + } + expire_after_ms: { + common: false + description: "A maximum period of time to wait after the last event is received before a combined event should be considered complete." + name: "expire_after_ms" + required: false + warnings: [] + type: { + uint: { + default: 30000 + unit: "milliseconds" + } + } + } + flush_period_ms: { + common: false + description: "Controls the frequency that Vector checks for (and flushes) expired events." + name: "flush_period_ms" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: "milliseconds" + } + } + } + group_by: { + common: true + description: "An ordered list of fields by which to group events. Each group is combined independently, allowing you to keep independent events separate. When no fields are specified, all events will be combined in a single group. Events missing a specified field will be combined in their own group." + name: "group_by" + required: false + warnings: [] + type: { + array: { + default: [] + items: { + type: { + string: { + examples: ["request_id", "user_id", "transaction_id"] + syntax: "literal" + } + } + } + } + } + } + merge_strategies: { + common: false + category: "Merge_strategies" + description: """ + A map of field names to custom merge strategies. For each + field specified this strategy will be used for combining + events rather than the default behavior. + + The default behavior is as follows: + + 1. The first value of a string field is kept, subsequent + values are discarded. + 2. For timestamp fields the first is kept and a new field + `[field-name]_end` is added with the last received + timestamp value. + 3. Numeric values are summed. + """ + name: "merge_strategies" + required: false + warnings: [] + type: { + object: { + examples: [{ + method: "discard" + path: "discard" + duration_ms: "sum" + query: "array" + }] + options: { + "*": { + description: "The custom merge strategy to use for a field." + name: "*" + required: true + warnings: [] + type: { + string: { + enum: { + array: "Each value is appended to an array." + concat: "Concatenate each string value (delimited with a space)." + concat_newline: "Concatenate each string value (delimited with a newline)." + discard: "Discard all but the first value found." + sum: "Sum all numeric values." + max: "The maximum of all numeric values." + min: "The minimum of all numeric values." + } + examples: ["array", "concat", "concat_newline", "discard", "sum", "max", "min"] + syntax: "literal" + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + starts_when: { + common: false + description: """ + A condition used to distinguish the first event of a transaction. If this condition resolves to `true` + for an event, the previous transaction is flushed (without this event) and a new transaction is started. + """ + name: "starts_when" + required: false + warnings: [] + type: { + string: { + default: null + examples: [#".status_code != 200 && !includes(["info", "debug"], .severity)"#] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + reduce: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "reduce" + #ExampleConfig: { + title: string + configuration: { + ends_when: null + expire_after_ms: null + flush_period_ms: null + group_by: null + merge_strategies: null + starts_when: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Merge Ruby exceptions" + input: [{ + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: "foobar.rb:6:in `/': divided by 0 (ZeroDivisionError)" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }, { + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: " from foobar.rb:6:in `bar'" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }, { + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: " from foobar.rb:2:in `foo'" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }, { + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: " from foobar.rb:9:in `
'" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }, { + log: { + timestamp: "2020-10-07T12:33:22.123528Z" + message: "Hello world, I am a new log" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }] + configuration: { + group_by: ["host", "pid", "tid"] + marge_strategies: { + message: "concat_newline" + } + ends_when: null + expire_after_ms: null + flush_period_ms: null + merge_strategies: null + type: null + starts_when: "match(.message, /^[^\\s]/)" + inputs: null + } + output: [{ + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: """ + foobar.rb:6:in `/': divided by 0 (ZeroDivisionError) + from foobar.rb:6:in `bar' + from foobar.rb:2:in `foo' + from foobar.rb:9:in `
' + """ + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }, { + log: { + timestamp: "2020-10-07T12:33:22.123528Z" + message: "Hello world, I am a new log" + host: "host-1.hostname.com" + pid: 1234 + tid: 5678 + } + }] + }, { + title: "Reduce Rails logs into a single transaction" + configuration: { + ends_when: null + expire_after_ms: null + flush_period_ms: null + group_by: null + merge_strategies: null + starts_when: null + type: null + inputs: null + } + input: [{ + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + message: "Received GET /path" + request_id: "abcd1234" + request_path: "/path" + request_params: { + key: "val" + } + } + }, { + log: { + timestamp: "2020-10-07T12:33:21.832345Z" + message: "Executed query in 5.2ms" + request_id: "abcd1234" + query: "SELECT * FROM table" + query_duration_ms: 5.2 + } + }, { + log: { + timestamp: "2020-10-07T12:33:22.457423Z" + message: "Rendered partial _partial.erb in 2.3ms" + request_id: "abcd1234" + template: "_partial.erb" + render_duration_ms: 2.3 + } + }, { + log: { + timestamp: "2020-10-07T12:33:22.543323Z" + message: "Executed query in 7.8ms" + request_id: "abcd1234" + query: "SELECT * FROM table" + query_duration_ms: 7.8 + } + }, { + log: { + timestamp: "2020-10-07T12:33:22.742322Z" + message: "Sent 200 in 15.2ms" + request_id: "abcd1234" + response_status: 200 + response_duration_ms: 5.2 + } + }] + output: { + log: { + timestamp: "2020-10-07T12:33:21.223543Z" + timestamp_end: "2020-10-07T12:33:22.742322Z" + request_id: "abcd1234" + request_path: "/path" + request_params: { + key: "val" + } + query_duration_ms: 13.0 + render_duration_ms: 2.3 + status: 200 + response_duration_ms: 5.2 + } + } + }] + telemetry: { + metrics: { + stale_events_flushed_total: { + description: "The number of stale events that Vector has flushed." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "stale_events_flushed_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + } + } + log_to_metric: { + kind: "transform" + title: "Log to Metric" + description: "Derives one or more metric events from a log event." + classes: { + commonly_used: false + development: "stable" + egress_method: "batch" + stateful: false + } + features: { + convert: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + metrics: { + description: "A table of key/value pairs representing the keys to be added to the event." + name: "metrics" + required: true + warnings: [] + type: { + array: { + items: { + type: { + object: { + examples: [] + options: { + field: { + description: "The log field to use as the metric." + name: "field" + required: true + warnings: [] + type: { + string: { + examples: ["duration", "parent.child"] + syntax: "literal" + } + } + } + increment_by_value: { + description: """ + If `true` the metric will be incremented by the `field` value. + If `false` the metric will be incremented by 1 regardless of the `field` value. + """ + name: "increment_by_value" + required: false + common: false + warnings: [] + relevant_when: "type = \"counter\"" + type: { + bool: { + default: false + } + } + } + name: { + description: "The name of the metric. Defaults to `_total` for `counter` and `` for `gauge`." + name: "name" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["duration_total"] + default: null + syntax: "template" + } + } + } + namespace: { + description: "The namespace of the metric." + name: "namespace" + required: false + common: true + warnings: [] + type: { + string: { + examples: ["service"] + default: null + syntax: "template" + } + } + } + tags: { + category: "Tags" + description: "Key/value pairs representing [metric tags][docs.data-model.metric#tags]." + name: "tags" + required: false + common: true + warnings: [] + type: { + object: { + examples: [{ + host: "${HOSTNAME}" + region: "us-east-1" + status: "{{status}}" + }] + options: { + "*": { + description: """ + Key/value pairs representing [metric tags][docs.data-model.metric#tags]. + Environment variables and field interpolation is allowed. + """ + name: "*" + required: true + warnings: [] + type: { + "*": {} + } + } + } + } + } + } + type: { + description: "The metric type." + name: "type" + required: true + warnings: [] + type: { + string: { + enum: { + counter: "A [counter metric type][docs.data-model.metric#counter]." + gauge: "A [gauge metric type][docs.data-model.metric#gauge]." + histogram: "A [distribution metric type][docs.data-model.metric#distribution] with histogram statistic." + set: "A [set metric type][docs.data-model.metric#set]." + summary: "A [distribution metric type][docs.data-model.metric#distribution] with summary statistic." + } + examples: ["counter", "gauge", "histogram", "set", "summary"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + log_to_metric: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + output: { + metrics: { + counter: { + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "counter" + type: "counter" + default_namespace: "vector" + } + distribution: { + description: """ + A distribution represents a distribution of + sampled values. It is used with services + that support global histograms and summaries. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "distribution" + type: "distribution" + default_namespace: "vector" + } + gauge: { + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "gauge" + type: "gauge" + default_namespace: "vector" + } + set: { + description: "A set represents an array of unique values." + tags: { + "*": { + name: "*" + description: "Any tags present on the metric." + examples: ["my-host.local"] + required: false + } + } + name: "set" + type: "gauge" + default_namespace: "vector" + } + } + } + env_vars: {} + type: "log_to_metric" + #ExampleConfig: { + title: string + configuration: { + metrics: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Counter" + notes: "This example demonstrates counting HTTP status codes." + configuration: { + metrics: [{ + type: "counter" + field: "status" + name: "response_total" + namespace: "service" + tags: { + status: "{{status}}" + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "Sent 200 in 54.2ms" + status: 200 + } + } + output: [{ + metric: { + kind: "incremental" + name: "response_total" + namespace: "service" + tags: { + status: "200" + host: "10.22.11.222" + } + counter: { + value: 1.0 + } + } + }] + }, { + title: "Sum" + notes: "In this example we'll demonstrate computing a sum by computing the total of orders placed." + configuration: { + metrics: [{ + type: "counter" + field: "total" + name: "order_total" + increment_by_value: true + tags: { + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "Order placed for $122.20" + total: 122.2 + } + } + output: [{ + metric: { + kind: "incremental" + name: "order_total" + tags: { + host: "10.22.11.222" + } + counter: { + value: 122.2 + } + } + }] + }, { + title: "Gauges" + notes: "In this example we'll demonstrate creating a gauge that represents the current CPU load averages." + configuration: { + metrics: [{ + type: "gauge" + field: "1m_load_avg" + tags: { + host: "{{host}}" + } + }, { + type: "gauge" + field: "5m_load_avg" + tags: { + host: "{{host}}" + } + }, { + type: "gauge" + field: "15m_load_avg" + tags: { + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "CPU activity sample" + "1m_load_avg": 78.2 + "5m_load_avg": 56.2 + "15m_load_avg": 48.7 + } + } + output: [{ + metric: { + kind: "absolute" + name: "1m_load_avg" + tags: { + host: "10.22.11.222" + } + gauge: { + value: 78.2 + } + } + }, { + metric: { + kind: "absolute" + name: "5m_load_avg" + tags: { + host: "10.22.11.222" + } + gauge: { + value: 56.2 + } + } + }, { + metric: { + kind: "absolute" + name: "15m_load_avg" + tags: { + host: "10.22.11.222" + } + gauge: { + value: 48.7 + } + } + }] + }, { + title: "Histogram distribution" + notes: "This example demonstrates capturing timings in your logs to compute histogram." + configuration: { + metrics: [{ + type: "histogram" + field: "time" + name: "time_ms" + tags: { + status: "{{status}}" + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "Sent 200 in 54.2ms" + status: 200 + time: 54.2 + } + } + output: [{ + metric: { + kind: "incremental" + name: "time_ms" + tags: { + status: "200" + host: "10.22.11.222" + } + distribution: { + samples: [{ + value: 54.2 + rate: 1 + }] + statistic: "histogram" + } + } + }] + }, { + title: "Summary distribution" + notes: "This example demonstrates capturing timings in your logs to compute summary." + configuration: { + metrics: [{ + type: "summary" + field: "time" + name: "time_ms" + tags: { + status: "{{status}}" + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "Sent 200 in 54.2ms" + status: 200 + time: 54.2 + } + } + output: [{ + metric: { + kind: "incremental" + name: "time_ms" + tags: { + status: "200" + host: "10.22.11.222" + } + distribution: { + samples: [{ + value: 54.2 + rate: 1 + }] + statistic: "summary" + } + } + }] + }, { + title: "Set" + notes: """ + In this example we'll demonstrate how to use sets. Sets are primarly a Statsd concept + that represent the number of unique values seens for a given metric. + The idea is that you pass the unique/high-cardinality value as the metric value + and the metric store will count the number of unique values seen. + """ + configuration: { + metrics: [{ + type: "set" + field: "remote_addr" + namespace: "{{branch}}" + tags: { + host: "{{host}}" + } + }] + type: null + inputs: null + } + input: { + log: { + host: "10.22.11.222" + message: "Sent 200 in 54.2ms" + remote_addr: "233.221.232.22" + branch: "dev" + } + } + output: [{ + metric: { + kind: "incremental" + name: "remote_addr" + namespace: "dev" + tags: { + host: "10.22.11.222" + } + set: { + values: ["233.221.232.22"] + } + } + }] + }] + how_it_works: { + multiple_metrics: { + #Subsection: { + title: string + body: string + } + name: "multiple_metrics" + title: "Multiple Metrics" + body: """ + For clarification, when you convert a single `log` event into multiple `metric` + events, the `metric` events are not emitted as a single array. They are emitted + individually, and the downstream components treat them as individual events. + Downstream components are not aware they were derived from a single log event. + """ + } + reducing: { + #Subsection: { + title: string + body: string + } + name: "reducing" + title: "Reducing" + body: """ + It's important to understand that this transform does not reduce multiple logs + to a single metric. Instead, this transform converts logs into granular + individual metrics that can then be reduced at the edge. Where the reduction + happens depends on your metrics storage. For example, the + [`prometheus_exporter` sink][docs.sinks.prometheus_exporter] will reduce logs in the sink itself + for the next scrape, while other metrics sinks will proceed to forward the + individual metrics for reduction in the metrics storage itself. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + null_fields: { + #Subsection: { + title: string + body: string + } + name: "null_fields" + title: "Null Fields" + body: """ + If the target log `field` contains a `null` value it will ignored, and a metric + will not be emitted. + """ + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + aws_cloudwatch_logs_subscription_parser: { + kind: "transform" + title: "AWS CloudWatch Logs Subscription Parser" + description: """ + Parses AWS CloudWatch Logs events (configured through AWS Cloudwatch + subscriptions) coming from the `aws_kinesis_firehose` source. + """ + classes: { + commonly_used: false + development: "deprecated" + egress_method: "batch" + stateful: false + } + features: { + parse: { + format: { + name: "AWS CloudWatch Logs subscription events" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html" + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_aws_cloudwatch_log_subscription_message(.message) + ``` + """] + notices: [] + } + configuration: { + field: { + common: true + description: "The log field to decode as an AWS CloudWatch Logs Subscription JSON event. The field must hold a string value." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_cloudwatch_logs_subscription_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + output: { + logs: { + line: { + description: "One event will be published per log event in the subscription message." + name: "line" + fields: { + timestamp: { + description: "The timestamp of the log event." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + message: { + description: "The body of the log event." + name: "message" + required: true + warnings: [] + type: { + string: { + examples: ["hello", "{\"key\": \"value\"}"] + syntax: "literal" + } + } + } + id: { + description: "The CloudWatch Logs event id." + name: "id" + required: true + warnings: [] + type: { + string: { + examples: ["35683658089614582423604394983260738922885519999578275840"] + syntax: "literal" + } + } + } + log_group: { + description: "The log group the event came from." + name: "log_group" + required: true + warnings: [] + type: { + string: { + examples: ["/lambda/test"] + syntax: "literal" + } + } + } + log_stream: { + description: "The log stream the event came from." + name: "log_stream" + required: true + warnings: [] + type: { + string: { + examples: ["2020/03/24/[$LATEST]794dbaf40a7846c4984ad80ebf110544"] + syntax: "literal" + } + } + } + owner: { + description: "The ID of the AWS account the logs came from." + name: "owner" + required: true + warnings: [] + type: { + string: { + examples: ["111111111111"] + syntax: "literal" + } + } + } + subscription_filters: { + description: "The list of subscription filter names that the logs were sent by." + name: "subscription_filters" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["Destination"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + env_vars: {} + type: "aws_cloudwatch_logs_subscription_parser" + #ExampleConfig: { + title: string + configuration: { + field: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Default" + configuration: { + field: "message" + type: null + inputs: null + } + input: { + log: { + message: """ + \t{ + \t "messageType": "DATA_MESSAGE", + \t "owner": "111111111111", + \t "logGroup": "test", + \t "logStream": "test", + \t "subscriptionFilters": [ + \t\t"Destination" + \t ], + \t "logEvents": [ + \t\t{ + \t\t "id": "35683658089614582423604394983260738922885519999578275840", + \t\t "timestamp": 1600110569039, + \t\t "message": "{"bytes":26780,"datetime":"14/Sep/2020:11:45:41 -0400","host":"157.130.216.193","method":"PUT","protocol":"HTTP/1.0","referer":"https://www.principalcross-platform.io/markets/ubiquitous","request":"/expedite/convergence","source_type":"stdin","status":301,"user-identifier":"-"}" + \t\t}, + \t\t{ + \t\t "id": "35683658089659183914001456229543810359430816722590236673", + \t\t "timestamp": 1600110569041, + \t\t "message": "{"bytes":17707,"datetime":"14/Sep/2020:11:45:41 -0400","host":"109.81.244.252","method":"GET","protocol":"HTTP/2.0","referer":"http://www.investormission-critical.io/24/7/vortals","request":"/scale/functionalities/optimize","source_type":"stdin","status":502,"user-identifier":"feeney1708"}" + \t\t} + \t ] + \t} + """ + } + } + output: { + log: { + id: "35683658089614582423604394983260738922885519999578275840" + log_group: "test" + log_stream: "test" + message: "{\"bytes\":26780,\"datetime\":\"14/Sep/2020:11:45:41 -0400\",\"host\":\"157.130.216.193\",\"method\":\"PUT\",\"protocol\":\"HTTP/1.0\",\"referer\":\"https://www.principalcross-latform.io/markets/ubiquitous\",\"request\":\"/expedite/convergence\",\"source_type\":\"stdin\",\"status\":301,\"user-identifier\":\"-\"}" + owner: "111111111111" + timestamp: "2020-09-14T19:09:29.039Z" + subscription_filters: ["Destination"] + } + } + }] + how_it_works: { + structured_events: { + #Subsection: { + title: string + body: string + } + name: "structured_events" + title: "Structured Log Events" + body: "Note that the events themselves are not parsed. If they are structured data, you will typically want to pass them through a [parsing transform](https://vector.dev/components/?functions%5B%5D=parse)." + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + lua: { + kind: "transform" + output: {} + title: "Lua" + description: "Transform events with a full embedded [Lua](https://www.lua.org) 5.3.5 engine." + classes: { + commonly_used: false + development: "stable" + egress_method: "stream" + stateful: true + } + features: { + program: { + runtime: { + name: "Lua" + url: "https://www.lua.org" + version: "5.3" + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + The `lua` transform is ~60% slower than the [`remap` transform](https://vector.dev/docs/reference/transforms/remap/), so we + recommended that you use the `remap` transform whenever possible. The `lua` transform is + designed solely for edge cases not covered by the `remap` transform and not as a go-to option. If the + `remap` transform doesn't cover your use case, please [open an issue](https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature) and let + us know. + """] + notices: [] + } + configuration: { + hooks: { + category: "Hooks" + description: "Configures hooks handlers." + name: "hooks" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + init: { + common: false + description: "A function which is called when the first event comes, before calling `hooks.process`" + name: "init" + required: false + warnings: [] + type: { + string: { + default: null + examples: [""" + function (emit) + -- Custom Lua code here + end + """, "init"] + syntax: "literal" + } + } + } + process: { + description: "A function which is called for each incoming event. It can produce new events using `emit` function." + name: "process" + required: true + warnings: [] + type: { + string: { + examples: [""" + function (event, emit) + event.log.field = "value" -- set value of a field + event.log.another_field = nil -- remove field + event.log.first, event.log.second = nil, event.log.first -- rename field + -- Very important! Emit the processed event. + emit(event) + end + """, "process"] + syntax: "literal" + } + } + } + shutdown: { + common: false + description: "A function which is called when Vector is stopped. It can produce new events using `emit` function." + name: "shutdown" + required: false + warnings: [] + type: { + string: { + default: null + examples: [""" + function (emit) + -- Custom Lua code here + end + """, "shutdown"] + syntax: "literal" + } + } + } + } + } + } + } + search_dirs: { + common: false + description: "A list of directories to search when loading a Lua file via the `require` function. If not specified, the modules are looked up in the directories of Vector's configs." + name: "search_dirs" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["/etc/vector/lua"] + syntax: "literal" + } + } + } + } + } + } + source: { + common: false + description: "The source which is evaluated when the transform is created." + name: "source" + required: false + warnings: [] + type: { + string: { + default: null + examples: [""" + function init() + count = 0 + end + + function process() + count = count + 1 + end + + function timer_handler(emit) + emit(make_counter(counter)) + counter = 0 + end + + function shutdown(emit) + emit(make_counter(counter)) + end + + function make_counter(value) + return metric = { + name = "event_counter", + kind = "incremental", + timestamp = os.date("!*t"), + counter = { + value = value + syntax: "literal" + } + } + end + """, """ + -- external file with hooks and timers defined + require('custom_module') + """] + syntax: "literal" + } + } + } + timers: { + common: false + description: "Configures timers which are executed periodically at given interval." + name: "timers" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + object: { + examples: [] + options: { + handler: { + description: "Defines a handler function which is executed periodially at `interval_seconds`. It can produce new events using `emit` function." + name: "handler" + required: true + warnings: [] + type: { + string: { + examples: ["timer_handler"] + syntax: "literal" + } + } + } + interval_seconds: { + description: "Defines the interval at which the timer handler would be executed." + name: "interval_seconds" + required: true + warnings: [] + type: { + uint: { + examples: [1, 10, 30] + unit: "seconds" + } + } + } + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + version: { + description: "Transform API version. Specifying this version ensures that Vector does not break backward compatibility." + name: "version" + required: true + warnings: [] + type: { + string: { + enum: { + "2": "Lua transform API version 2" + } + examples: ["2"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + lua: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "lua" + #ExampleConfig: { + title: string + configuration: { + hooks: null + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Add, rename, & remove log fields" + configuration: { + hooks: { + process: """ + function (event, emit) + \t-- Add root level field + \tevent.log.field = "new value" + + \t-- Add nested field + \tevent.log.nested.field = "nested value" + + \t-- Rename field + \tevent.log.renamed_field = event.log.field_to_rename + \tevent.log.field_to_rename = nil + + \t-- Remove fields + \tevent.log.field_to_remove = nil + + \temit(event) + end + """ + } + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: { + log: { + field_to_rename: "old value" + field_to_remove: "remove me" + } + } + output: { + log: { + field: "new value" + nested: { + field: "nested value" + } + renamed_field: "old value" + } + } + }, { + title: "Add, rename, remove metric tags" + configuration: { + hooks: { + process: """ + function (event, emit) + \t-- Add tag + \tevent.metric.tags.tag = "new value" + + \t-- Rename tag + \tevent.metric.tags.renamed_tag = event.log.tag_to_rename + \tevent.metric.tags.tag_to_rename = nil + + \t-- Remove tag + \tevent.metric.tags.tag_to_remove = nil + + \temit(event) + end + """ + } + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: { + tag_to_rename: "old value" + tag_to_remove: "remove me" + } + } + } + output: { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 2.0 + } + tags: { + tag: "new value" + renamed_tag: "old value" + } + } + } + }, { + title: "Drop an event" + configuration: { + hooks: { + process: """ + function (event, emit) + \t-- Drop event entirely by not calling the `emit` function + end + """ + } + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: { + log: { + field_to_rename: "old value" + field_to_remove: "remove me" + } + } + output: null + }, { + title: "Iterate over log fields" + configuration: { + hooks: { + process: """ + function (event, emit) + \t-- Remove all fields where the value is "-" + \tfor f, v in pairs(event) do + \t\tif v == "-" then + \t\t\tevent[f] = nil + \t\tend + \tend + + \temit(event) + end + """ + } + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: { + log: { + value_to_remove: "-" + value_to_keep: "keep" + } + } + output: { + log: { + value_to_keep: "keep" + } + } + }, { + title: "Parse timestamps" + configuration: { + hooks: { + init: """ + -- Parse timestamps like `2020-04-07 06:26:02.643` + timestamp_pattern = "(%d%d%d%d)[-](%d%d)[-](%d%d) (%d%d):(%d%d):(%d%d).?(%d*)" + + function parse_timestamp(str) + \tlocal year, month, day, hour, min, sec, millis = string.match(str, timestamp_pattern) + \tlocal ms = 0 + \tif millis and millis ~= "" then + \t\tms = tonumber(millis) + \tend + \treturn { + \t\tyear = tonumber(year), + \t\tmonth = tonumber(month), + \t\tday = tonumber(day), + \t\thour = tonumber(hour), + \t\tmin = tonumber(min), + \t\tsec = tonumber(sec), + \t\tnanosec = ms * 1000000 + \t} + end + """ + process: """ + function (event, emit) + \tevent.log.timestamp = parse_timestamp(event.log.timestamp_string) + \temit(event) + end + """ + } + search_dirs: null + source: null + timers: null + version: null + type: null + inputs: null + } + input: { + log: { + timestamp_string: "2020-04-07 06:26:02.643" + } + } + output: { + log: { + timestamp_string: "2020-04-07 06:26:02.643" + timestamp: "2020-04-07 06:26:02.643" + } + } + }, { + title: "Count the number of logs" + configuration: { + hooks: { + init: "init" + process: "process" + shutdown: "shutdown" + } + timers: [{ + interval_seconds: 5 + handler: "timer_handler" + }] + search_dirs: null + version: null + type: null + source: """ + function init() + \tcount = 0 + end + + function process() + \tcount = count + 1 + end + + function timer_handler(emit) + \temit(make_counter(count)) + \tcount = 0 + end + + function shutdown(emit) + \temit(make_counter(count)) + end + + function make_counter(value) + \treturn metric = { + \t\tname = "event_counter", + \t\tkind = "incremental", + \t\ttimestamp = os.date("!*t"), + \t\tcounter = { + \t\t\tvalue = value + \t\t} + \t} + end + """ + inputs: null + } + input: { + log: {} + } + output: { + metric: { + kind: "incremental" + name: "event_counter" + counter: { + value: 1.0 + } + tags: { + tag: "new value" + renamed_tag: "old value" + } + } + } + }] + how_it_works: { + event_data_model: { + #Subsection: { + title: string + body: string + } + name: "event_data_model" + title: "Event Data Model" + body: """ + The `process` hook takes an `event` as its first argument. + Events are represented as [tables](https://www.lua.org/pil/2.5.html) in Lua + and follow Vector's data model exactly. Please refer to + Vector's [data model reference][docs.data-model] for the event + schema. How Vector's types map to Lua's type are covered below. + """ + sub_sections: [{ + title: "Type Mappings" + body: """ + The correspondence between Vector's [data types](https://vector.dev/docs/about/data-model/log/#types) and Lua data type is summarized + by the following table: + + | Vector Type | Lua Type | Comment | + |:----------------------------------------------------|:--------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | [`String`](https://vector.dev/docs/about/data-model/log/#strings) | [`string`](https://www.lua.org/pil/2.4.html) | | + | [`Integer`](https://vector.dev/docs/about/data-model/log/#ints) | [`integer`](https://docs.rs/rlua/latest/rlua/type.Integer.html) | | + | [`Float`](https://vector.dev/docs/about/data-model/log/#floats) | [`number`](https://docs.rs/rlua/latest/rlua/type.Number.html) | | + | [`Boolean`](https://vector.dev/docs/about/data-model/log/#booleans) | [`boolean`](https://www.lua.org/pil/2.2.html) | | + | [`Timestamp`](https://vector.dev/docs/about/data-model/log/#timestamps) | [`table`](https://www.lua.org/pil/2.5.html) | There is no dedicated timestamp type in Lua. Timestamps are represented as tables using the convention defined by [`os.date`](https://www.lua.org/manual/5.3/manual.html#pdf-os.date) and [`os.time`](https://www.lua.org/manual/5.3/manual.html#pdf-os.time). The table representation of a timestamp contains the fields `year`, `month`, `day`, `hour`, `min`, `sec`, `nanosec`, `yday`, `wday`, and `isdst`. If such a table is passed from Lua to Vector, the fields `yday`, `wday`, and `isdst` can be omitted. In addition to the `os.time` representation, Vector supports sub-second resolution with a `nanosec` field in the table. | + | [`Null`](https://vector.dev/docs/about/data-model/log/#null-values) | empty string | In Lua setting the value of a table field to `nil` means deletion of this field. In addition, the length operator `#` does not work in the expected way with sequences containing nulls. Because of that `Null` values are encoded as empty strings. | + | [`Map`](https://vector.dev/docs/about/data-model/log/#maps) | [`table`](https://www.lua.org/pil/2.5.html) | | + | [`Array`](https://vector.dev/docs/about/data-model/log/#arrays) | [`sequence`](https://www.lua.org/pil/11.1.html) | Sequences are a special case of tables. Indexes start from 1, following the Lua convention. | + """ + }] + } + learning_lua: { + #Subsection: { + title: string + body: string + } + name: "learning_lua" + title: "Learning Lua" + body: """ + In order to write non-trivial transforms in Lua, one has to have + basic understanding of Lua. Because Lua is an easy to learn + language, reading a few first chapters of + [the official book](https://www.lua.org/pil/) or consulting + [the manual](https://www.lua.org/manual/5.3/manual.html) would suffice. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + search_dirs: { + #Subsection: { + title: string + body: string + } + name: "search_dirs" + title: "Search Directories" + body: """ + Vector provides a `search_dirs` option that allows you to specify + absolute paths that will be searched when using the + [Lua `require` function](https://www.lua.org/manual/5.3/manual.html#pdf-require). If this option is not + set, the directories of the configuration files will be used instead. + """ + } + } + telemetry: { + metrics: { + memory_used_bytes: { + description: "The total memory currently being used by Vector (in bytes)." + type: "gauge" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "memory_used_bytes" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + rename_fields: { + kind: "transform" + output: {} + title: "Rename Fields" + description: "Renames one or more log fields." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .new_name = del(.old_name) + ``` + """] + notices: [] + } + configuration: { + drop_empty: { + common: false + description: "If set to `true`, after renaming fields, remove any parent objects of the old field that are now empty." + name: "drop_empty" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + fields: { + category: "Fields" + description: "A table of old-key/new-key pairs representing the keys to be moved in the event." + name: "fields" + required: true + warnings: [""" + Vector makes no guarantee on the order of execution. If two rename + operations must be performed in a specific order, it is recommended to + split them up across two separate rename transforms. + """] + type: { + object: { + examples: [{ + old_field_name: "new_field_name" + parent: { + old_child_name: "parent.new_child_name" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + rename_fields: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "rename_fields" + #ExampleConfig: { + title: string + configuration: { + drop_empty: null + fields: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + conflicts: { + #Subsection: { + title: string + body: string + } + name: "conflicts" + title: "Conflicts" + body: "" + sub_sections: [{ + title: "Key Conflicts" + body: """ + Keys specified in this transform will replace existing keys. + + + + Vector makes no guarantee on the order of execution. If two rename + operations must be performed in a specific order, it is recommended to split + them up across two separate rename transforms. + + + """ + }, { + title: "Nested Key Conflicts" + body: """ + Keys are renamed in a deep fashion. They will not replace any ancestor + objects. For example, given the following `log` event: + + ```javascript + { + "root": "value2", + "parent": { + "child1": "value1" + } + } + ``` + + And the following configuration: + + ```toml + [transforms.rename_nested_field] + type = "rename_fields" + fields.root = "parent.child2" + ``` + + Will result in the following log event: + + ```javascript + { + "parent": { + "child1": "value1", + "child2": "value2" + } + } + ``` + + Notice that `parent.child1` field was preserved. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + remove_tags: { + kind: "transform" + output: {} + title: "Remove Tags" + description: "Removes one or more metric tags." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + del(.tag) + ``` + """] + notices: [] + } + configuration: { + tags: { + description: "The tag names to drop." + name: "tags" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["tag1", "tag2"] + syntax: "literal" + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + remove_tags: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "remove_tags" + #ExampleConfig: { + title: string + configuration: { + tags: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + coercer: { + kind: "transform" + output: {} + title: "Coercer" + description: "Coerces log fields into typed values." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .bool = to_bool("false") + .float = to_float("1.0") + .int = to_int("1") + .string = to_string(1) + .timestamp = to_timestamp("2021-01-15T12:33:22.213221Z") + ``` + """] + notices: [] + } + input: { + logs: true + metrics: null + } + configuration: { + drop_unspecified: { + common: false + description: "Set to `true` to drop all fields that are not specified in the `types` table. Make sure both `message` and `timestamp` are specified in the `types` table as their absense will cause the original message data to be dropped along with other extraneous fields." + name: "drop_unspecified" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + coercer: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "coercer" + #ExampleConfig: { + title: string + configuration: { + drop_unspecified: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Date" + configuration: { + types: { + bytes_in: "int" + bytes_out: "int" + status: "int" + timestamp: "timestamp|%d/%m/%Y:%H:%M:%S %z" + } + drop_unspecified: null + timezone: null + type: null + inputs: null + } + input: { + log: { + bytes_in: "5667" + bytes_out: "20574" + host: "5.86.210.12" + message: "GET /embrace/supply-chains/dynamic/vertical" + status: "201" + timestamp: "19/06/2019:17:20:49 -0400" + user_id: "zieme4647" + } + } + output: { + log: { + bytes_in: 5667 + bytes_out: 20574 + host: "5.86.210.12" + message: "GET /embrace/supply-chains/dynamic/vertical" + status: 201 + timestamp: "19/06/2019:17:20:49 -0400" + user_id: "zieme4647" + } + } + }] + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + tokenizer: { + kind: "transform" + output: {} + title: "Tokenizer" + description: """ + Tokenizes a field's value by splitting on white space, ignoring special + wrapping characters, and zip the tokens into ordered field names. + """ + classes: { + commonly_used: true + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "Token Format" + url: null + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_tokens(.message) + ``` + """] + notices: [] + } + configuration: { + drop_field: { + common: true + description: "If `true` the `field` will be dropped after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The log field to tokenize." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child"] + syntax: "literal" + } + } + } + field_names: { + description: "The log field names assigned to the resulting tokens, in order." + name: "field_names" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["timestamp", "level", "message", "parent.child"] + syntax: "literal" + } + } + } + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + tokenizer: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "tokenizer" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + field: null + field_names: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Loosely Structured" + configuration: { + field: "message" + field_names: ["remote_addr", "ident", "user_id", "timestamp", "message", "status", "bytes"] + drop_field: null + timezone: null + type: null + types: { + timestamp: "timestamp" + status: "int" + bytes: "int" + } + inputs: null + } + input: { + log: { + message: "5.86.210.12 - zieme4647 [19/06/2019:17:20:49 -0400] \"GET /embrace/supply-chains/dynamic/vertical\" 201 20574" + } + } + output: { + log: { + remote_addr: "5.86.210.12" + user_id: "zieme4647" + timestamp: "19/06/2019:17:20:49 -0400" + message: "GET /embrace/supply-chains/dynamic/vertical" + status: 201 + bytes: 20574 + } + } + }] + input: { + logs: true + metrics: null + } + how_it_works: { + blank_values: { + #Subsection: { + title: string + body: string + } + name: "blank_values" + title: "Blank Values" + body: """ + Both `" "` and `"-"` are considered blank values and their mapped fields will + be set to `null`. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + special_characters: { + #Subsection: { + title: string + body: string + } + name: "special_characters" + title: "Special Characters" + body: """ + In order to extract raw values and remove wrapping characters, we must treat + certain characters as special. These characters will be discarded: + + * `"..."` - Quotes are used tp wrap phrases. Spaces are preserved, but the wrapping quotes will be discarded. + * `[...]` - Brackets are used to wrap phrases. Spaces are preserved, but the wrapping brackets will be discarded. + * `\\` - Can be used to escape the above characters, Vector will treat them as literal. + """ + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + ansi_stripper: { + kind: "transform" + output: {} + title: "ANSI Stripper" + description: "Strips [ANSI escape sequences](https://en.wikipedia.org/wiki/ANSI_escape_code)." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + sanitize: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = strip_ansi_escape_codes(.message) + ``` + """] + notices: [] + } + configuration: { + field: { + common: true + description: "The target field to strip ANSI escape sequences from." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + ansi_stripper: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "ansi_stripper" + #ExampleConfig: { + title: string + configuration: { + field: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + remap: { + kind: "transform" + output: {} + title: "Remap" + description: """ + Is the recommended transform for parsing, shaping, and transforming data in Vector. It implements the + [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL), an expression-oriented language designed for processing + observability data (logs and metrics) in a safe and performant manner. + + Please refer to the [VRL reference](https://vector.dev/docs/reference/vrl/) when writing VRL scripts. + """ + classes: { + commonly_used: true + development: "beta" + egress_method: "stream" + stateful: false + } + features: { + program: { + runtime: { + name: "Vector Remap Language (VRL)" + url: "https://vector.dev/docs/reference/vrl/" + version: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + source: { + description: "The [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL) program to execute for each event." + name: "source" + required: true + warnings: [] + type: { + string: { + examples: [""" + . = parse_json(.message) + .new_field = "new value" + .status = to_int(.status) + .duration = parse_duration(.duration, "s") + .new_name = .old_name + del(.old_name) + """] + syntax: "remap_program" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + remap: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "remap" + #ExampleConfig: { + title: string + configuration: { + source: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Parse Syslog logs" + configuration: { + source: """ + structured = parse_syslog!(.message) + . = merge(., structured) + """ + type: null + inputs: null + } + input: { + log: { + message: "<102>1 2020-12-22T15:22:31.111Z vector-user.biz su 2666 ID389 - Something went wrong" + } + } + output: { + log: { + appname: "su" + facility: "ntp" + hostname: "vector-user.biz" + message: "Something went wrong" + msgid: "ID389" + procid: 2666 + severity: "info" + timestamp: "2020-12-22T15:22:31.111Z" + } + } + }, { + title: "Parse key/value (logfmt) logs" + configuration: { + source: """ + structured = parse_key_value!(.message) + . = merge(., structured) + """ + type: null + inputs: null + } + input: { + log: { + message: "@timestamp=\"Sun Jan 10 16:47:39 EST 2021\" level=info msg=\"Stopping all fetchers\" tag#production=stopping_fetchers id=ConsumerFetcherManager-1382721708341 module=kafka.consumer.ConsumerFetcherManager" + } + } + output: { + log: { + "@timestamp": "Sun Jan 10 16:47:39 EST 2021" + level: "info" + msg: "Stopping all fetchers" + "tag#production": "stopping_fetchers" + id: "ConsumerFetcherManager-1382721708341" + module: "kafka.consumer.ConsumerFetcherManager" + } + } + }, { + title: "Parse custom logs" + configuration: { + source: """ + structured = parse_regex!(.message, /^(?P\\d+/\\d+/\\d+ \\d+:\\d+:\\d+) \\[(?P\\w+)\\] (?P\\d+)#(?P\\d+):(?: \\*(?P\\d+))? (?P.*)$/) + . = merge(., structured) + + # Coerce parsed fields + .timestamp = parse_timestamp(.timestamp, "%Y/%m/%d %H:%M:%S") ?? now() + .pid = to_int(.pid) + .tid = to_int(.tid) + + # Extract structured data + message_parts = split(.message, ", ", limit: 2) + structured = parse_key_value(message_parts[1], key_value_delimiter: ":", field_delimiter: ",") ?? {} + .message = message_parts[0] + . = merge(., structured) + """ + type: null + inputs: null + } + input: { + log: { + message: "2021/01/20 06:39:15 [error] 17755#17755: *3569904 open() \"/usr/share/nginx/html/test.php\" failed (2: No such file or directory), client: xxx.xxx.xxx.xxx, server: localhost, request: \"GET /test.php HTTP/1.1\", host: \"yyy.yyy.yyy.yyy\"" + } + } + output: { + log: { + timestamp: "2021/01/20 06:39:15" + severity: "error" + pid: "17755" + tid: "17755" + connid: "3569904" + message: "open() \"/usr/share/nginx/html/test.php\" failed (2: No such file or directory)" + client: "xxx.xxx.xxx.xxx" + server: "localhost" + request: "GET /test.php HTTP/1.1" + host: "yyy.yyy.yyy.yyy" + } + } + }, { + title: "Multiple parsing strategies" + configuration: { + source: """ + structured = + parse_syslog(.message) ?? + parse_common_log(.message) ?? + parse_regex!(.message, /^(?P\\d+/\\d+/\\d+ \\d+:\\d+:\\d+) \\[(?P\\w+)\\] (?P\\d+)#(?P\\d+):(?: \\*(?P\\d+))? (?P.*)$/) + . = merge(., structured) + """ + type: null + inputs: null + } + input: { + log: { + message: "<102>1 2020-12-22T15:22:31.111Z vector-user.biz su 2666 ID389 - Something went wrong" + } + } + output: { + log: { + appname: "su" + facility: "ntp" + hostname: "vector-user.biz" + message: "Something went wrong" + msgid: "ID389" + procid: 2666 + severity: "info" + timestamp: "2020-12-22 15:22:31.111 UTC" + } + } + }, { + title: "Modify metric tags" + configuration: { + source: """ + .environment = get_env_var!("ENV") # add + .hostname = del(.host) # rename + del(.email) + """ + type: null + inputs: null + } + input: { + metric: { + kind: "incremental" + name: "user_login_total" + counter: { + value: 102.0 + } + tags: { + host: "my.host.com" + instance_id: "abcd1234" + email: "vic@vector.dev" + } + } + } + output: { + metric: { + kind: "incremental" + name: "user_login_total" + counter: { + value: 102.0 + } + tags: { + environment: "production" + hostname: "my.host.com" + instance_id: "abcd1234" + } + } + } + }] + how_it_works: { + remap_language: { + #Subsection: { + title: string + body: string + } + name: "remap_language" + title: "Vector Remap Language" + body: """ + The Vector Remap Language (VRL) is a restrictive, fast, and safe language we + designed specifically for mapping observability data. It avoids the need to + chain together many fundamental Vector transforms to accomplish rudimentary + reshaping of data. + + The intent is to offer the same robustness of full language runtime (ex: Lua) + without paying the performance or safety penalty. + + Learn more about Vector's Remap Language in the + [Vector Remap Language reference](https://vector.dev/docs/reference/vrl/). + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + add_fields: { + kind: "transform" + output: {} + title: "Add Fields" + description: "Adds fields to log events." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + shape: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .severity = "crit" + .status = 200 + .success_codes = [200, 201, 202, 204] + .timestamp = now() + ``` + """] + notices: [] + } + configuration: { + fields: { + category: "Fields" + description: "A table of key/value pairs representing the keys to be added to the event." + name: "fields" + required: true + warnings: [] + type: { + object: { + examples: [{ + string_field: "string value" + env_var_field: "${ENV_VAR}" + templated_field: "{{ my_other_field }}" + int_field: 1 + float_field: 1.2 + bool_field: true + timestamp_field: "1979-05-27T00:32:00-0700" + parent: { + child_field: "child_value" + } + list_field: ["first", "second", "third"] + }] + options: { + "*": { + description: "The name of the field to add. Accepts all supported configuration types. Use `.` for adding nested fields." + name: "*" + required: true + warnings: [] + type: { + "*": {} + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + overwrite: { + common: true + description: "By default, fields will be overridden. Set this to `false` to avoid overwriting values." + name: "overwrite" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + add_fields: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "add_fields" + #ExampleConfig: { + title: string + configuration: { + fields: null + overwrite: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + conflicts: { + #Subsection: { + title: string + body: string + } + name: "conflicts" + title: "Conflicts" + body: "" + sub_sections: [{ + title: "Key Conflicts" + body: "Keys specified in this transform will replace existing keys." + }, { + title: "Nested Key Conflicts" + body: """ + Nested keys are added in a _deep_ fashion. They will not replace any ancestor + objects. For example, given the following `log` event: + + ```javascript + { + "parent": { + "child1": "value1" + } + } + ``` + + And the following configuration: + + ```toml + [transforms.add_nested_field] + type = "add_fields" + fields.parent.child2 = "value2" + ``` + + Will result in the following event: + + ```javascript + { + "parent": { + "child1": "value1", + "child2": "value2" + } + } + ``` + + Notice that `parent.child1` field was preserved. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + aws_ec2_metadata: { + kind: "transform" + title: "AWS EC2 Metadata" + description: "Enriches log events with AWS EC2 environment metadata." + classes: { + commonly_used: false + development: "stable" + egress_method: "stream" + stateful: false + } + features: { + enrich: { + from: { + service: { + name: "AWS EC2 instance metadata" + url: "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html" + versions: ">= 2" + } + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [""" + Running this transform within Docker on EC2 requires 2 network hops. Users must raise this limit: + + ```bash + aws ec2 modify-instance-metadata-options --instance-id --http-endpoint enabled --http-put-response-hop-limit 2 + ``` + """] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + common: false + description: "Override the default EC2 Metadata endpoint." + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "http://169.254.169.254" + syntax: "literal" + } + } + } + fields: { + common: true + description: "A list of fields to include in each event." + name: "fields" + required: false + warnings: [] + type: { + array: { + default: ["instance-id", "local-hostname", "local-ipv4", "public-hostname", "public-ipv4", "ami-id", "availability-zone", "vpc-id", "subnet-id", "region"] + items: { + type: { + string: { + examples: ["instance-id", "local-hostname"] + syntax: "literal" + } + } + } + } + } + } + namespace: { + common: true + description: "Prepend a namespace to each field's key." + name: "namespace" + required: false + warnings: [] + type: { + string: { + default: "" + examples: ["", "ec2", "aws.ec2"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + refresh_interval_secs: { + common: true + description: "The interval in seconds at which the EC2 Metadata api will be called." + name: "refresh_interval_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: null + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_ec2_metadata: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + output: { + logs: { + log: { + description: "Log event enriched with EC2 metadata" + name: "log" + fields: { + "ami-id": { + description: "The `ami-id` that the current EC2 instance is using." + name: "ami-id" + required: true + warnings: [] + type: { + string: { + examples: ["ami-00068cd7555f543d5"] + syntax: "literal" + } + } + } + "availability-zone": { + description: "The `availability-zone` that the current EC2 instance is running in." + name: "availability-zone" + required: true + warnings: [] + type: { + string: { + examples: ["54.234.246.107"] + syntax: "literal" + } + } + } + "instance-id": { + description: "The `instance-id` of the current EC2 instance." + name: "instance-id" + required: true + warnings: [] + type: { + string: { + examples: ["i-096fba6d03d36d262"] + syntax: "literal" + } + } + } + "local-hostname": { + description: "The `local-hostname` of the current EC2 instance." + name: "local-hostname" + required: true + warnings: [] + type: { + string: { + examples: ["ip-172-31-93-227.ec2.internal"] + syntax: "literal" + } + } + } + "local-ipv4": { + description: "The `local-ipv4` of the current EC2 instance." + name: "local-ipv4" + required: true + warnings: [] + type: { + string: { + examples: ["172.31.93.227"] + syntax: "literal" + } + } + } + "public-hostname": { + description: "The `public-hostname` of the current EC2 instance." + name: "public-hostname" + required: true + warnings: [] + type: { + string: { + examples: ["ec2-54-234-246-107.compute-1.amazonaws.com"] + syntax: "literal" + } + } + } + "public-ipv4": { + description: "The `public-ipv4` of the current EC2 instance." + name: "public-ipv4" + required: true + warnings: [] + type: { + string: { + examples: ["54.234.246.107"] + syntax: "literal" + } + } + } + region: { + description: "The `region` that the current EC2 instance is running in." + name: "region" + required: true + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + "role-name": { + description: "The `role-name` that the current EC2 instance is using." + name: "role-name" + required: true + warnings: [] + type: { + string: { + examples: ["some_iam_role"] + syntax: "literal" + } + } + } + "subnet-id": { + description: "The `subnet-id` of the current EC2 instance's default network interface." + name: "subnet-id" + required: true + warnings: [] + type: { + string: { + examples: ["subnet-9d6713b9"] + syntax: "literal" + } + } + } + "vpc-id": { + description: "The `vpc-id` of the current EC2 instance's default network interface." + name: "vpc-id" + required: true + warnings: [] + type: { + string: { + examples: ["vpc-a51da4dc"] + syntax: "literal" + } + } + } + } + } + } + } + env_vars: {} + type: "aws_ec2_metadata" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + fields: null + namespace: null + refresh_interval_secs: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + telemetry: { + metrics: { + metadata_refresh_failed_total: { + description: "The total number of failed efforts to refresh AWS EC2 metadata." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "metadata_refresh_failed_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + metadata_refresh_successful_total: { + description: "The total number of AWS EC2 metadata refreshes." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "metadata_refresh_successful_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + sample: { + kind: "transform" + output: {} + title: "Sample" + description: "Samples events at a configurable rate." + classes: { + commonly_used: false + development: "beta" + egress_method: "stream" + stateful: false + } + features: { + filter: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + key_field: { + common: false + description: """ + The name of the log field whose value will be hashed to determine if the event should be passed. + + Consistently samples the same events. Actual rate of sampling may differ from the configured one if + values in the field are not uniformly distributed. If left unspecified, or if the event doesn't have + `key_field`, events will be count rated. + """ + name: "key_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["message"] + syntax: "literal" + } + } + } + exclude: { + common: true + description: "The set of logical conditions to exclude events from sampling." + name: "exclude" + required: false + warnings: [] + type: { + string: { + default: null + examples: [#".status_code != 200 && !includes(["info", "debug"], .severity)"#] + syntax: "remap_boolean_expression" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + rate: { + description: """ + The rate at which events will be forwarded, expressed as 1/N. For example, + `rate = 10` means 1 out of every 10 events will be forwarded and the rest will be dropped. + """ + name: "rate" + required: true + warnings: [] + type: { + uint: { + examples: [10] + unit: null + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + sample: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "sample" + #ExampleConfig: { + title: string + configuration: { + key_field: null + exclude: null + rate: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + telemetry: { + metrics: { + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + dedupe: { + kind: "transform" + output: {} + title: "Dedupe events" + description: "Deduplicates events to reduce data volume by eliminating copies of data." + classes: { + commonly_used: false + development: "stable" + egress_method: "stream" + stateful: true + } + features: { + filter: {} + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + cache: { + common: false + category: "Cache" + description: "Options controlling how we cache recent Events for future duplicate checking." + name: "cache" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + num_events: { + common: true + description: "The number of recent Events to cache and compare new incoming Events against." + name: "num_events" + required: false + warnings: [] + type: { + uint: { + default: 5000 + unit: null + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + fields: { + category: "Fields" + description: "Options controlling what fields to match against." + name: "fields" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + ignore: { + common: false + description: "The field names to ignore when deciding if an Event is a duplicate. Incompatible with the `fields.match` option." + name: "ignore" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["field1", "parent.child_field"] + syntax: "literal" + } + } + } + } + } + } + match: { + common: true + description: "The field names considered when deciding if an Event is a duplicate. This can also be globally set via the [global `log_schema` options][docs.reference.configuration.global-options#log_schema]. Incompatible with the `fields.ignore` option." + name: "match" + required: false + warnings: [] + type: { + array: { + default: ["timestamp", "host", "message"] + items: { + type: { + string: { + examples: ["field1", "parent.child_field", "host", "message"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + dedupe: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "dedupe" + #ExampleConfig: { + title: string + configuration: { + cache: null + fields: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + how_it_works: { + cache_bahavior: { + #Subsection: { + title: string + body: string + } + name: "cache_bahavior" + title: "Cache Behavior" + body: """ + This transform is backed by an LRU cache of size `cache.num_events`. + That means that this transform will cache information in memory for + the last `cache.num_events` Events that it has processed. Entries + will be removed from the cache in the order they were inserted. If + an Event is received that is considered a duplicate of an Event + already in the cache that will put that event back to the head of + the cache and reset its place in line, making it once again last + entry in line to be evicted. + """ + } + memory_usage_details: { + #Subsection: { + title: string + body: string + } + name: "memory_usage_details" + title: "Memory Usage Details" + body: """ + Each entry in the cache corresponds to an incoming Event and + contains a copy of the 'value' data for all fields in the Event + being considered for matching. When using `fields.match` this will + be the list of fields specified in that configuration option. When + using `fields.ignore` that will include all fields present in the + incoming event except those specified in `fields.ignore`. Each entry + also uses a single byte per field to store the type information of + that field. When using `fields.ignore` each cache entry additionally + stores a copy of each field name being considered for matching. When + using `fields.match` storing the field names is not necessary. + """ + } + memory_utilization_estimation: { + #Subsection: { + title: string + body: string + } + name: "memory_utilization_estimation" + title: "Memory Utilization Estimation" + body: """ + If you want to estimate the memory requirements of this transform + for your dataset, you can do so with these formulas: + + When using `fields.match`: + + ```text + Sum(the average size of the *data* (but not including the field name) for each field in `fields.match`) * `cache.num_events` + ``` + + When using `fields.ignore`: + + ```text + (Sum(the average size of each incoming Event) - (the average size of the field name *and* value for each field in `fields.ignore`)) * `cache.num_events` + ``` + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + missing_fields: { + #Subsection: { + title: string + body: string + } + name: "missing_fields" + title: "Missing Fields" + body: """ + Fields with explicit null values will always be considered different + than if that field was omitted entirely. For example, if you run + this transform with `fields.match = ["a"]`, the event "{a: null, + b:5}" will be considered different to the event "{b:5}". + """ + } + } + telemetry: { + metrics: { + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + logfmt_parser: { + kind: "transform" + output: {} + title: "Logfmt Parser" + description: "Parses a log field's value in the [logfmt](https://brandur.org/logfmt) format." + classes: { + commonly_used: false + development: "deprecated" + egress_method: "stream" + stateful: false + } + features: { + parse: { + format: { + name: "Logfmt" + url: "https://brandur.org/logfmt" + versions: null + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + This transform has been deprecated in favor of the [`remap`](https://vector.dev/docs/reference/transforms/remap/) + transform, which enables you to use [Vector Remap Language](https://vector.dev/docs/reference/vrl/) (VRL for short) to + create transform logic of any degree of complexity. The examples below show how you can use VRL to + replace this transform's functionality. + + ```vrl + .message = parse_key_value(.message) + ``` + """] + notices: [] + } + configuration: { + drop_field: { + common: true + description: "If the specified `field` should be dropped (removed) after parsing." + name: "drop_field" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + field: { + common: true + description: "The log field to parse." + name: "field" + required: false + warnings: [] + type: { + string: { + default: "message" + examples: ["message", "parent.child", "array[0]"] + syntax: "literal" + } + } + } + timezone: { + common: false + description: "The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. This overrides the global [`timezone` option][docs.reference.configuration.global-options#timezone]. The time zone name may be any name in the [TZ database][urls.tz_time_zones], or `local` to indicate system local time." + name: "timezone" + required: false + warnings: [] + type: { + string: { + default: "local" + examples: ["local", "America/NewYork", "EST5EDT"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + types: { + common: true + category: "Types" + description: """ + Key/value pairs representing mapped log field names and types. This is used to + coerce log fields from strings into their proper types. The available types are + listed in the **Types** list below. + + Timestamp coercions need to be prefaced with `timestamp|`, for example + `"timestamp|%F"`. Timestamp specifiers can use either of the following: + + 1. One of the built-in-formats listed in the **Timestamp Formats** table below. + 2. The [time format specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) from Rust's + `chrono` library. + + ### Types + + * `array` + * `bool` + * `bytes` + * `float` + * `int` + * `map` + * `null` + * `timestamp` (see the table below for formats) + + ### Timestamp Formats + + Format | Description | Example + :------|:------------|:------- + `%F %T` | `YYYY-MM-DD HH:MM:SS` | `2020-12-01 02:37:54` + `%v %T` | `DD-Mmm-YYYY HH:MM:SS` | `01-Dec-2020 02:37:54` + `%FT%T` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)\\[RFC 3339](https://tools.ietf.org/html/rfc3339) format without time zone | `2020-12-01T02:37:54` + `%a, %d %b %Y %T` | [RFC 822](https://tools.ietf.org/html/rfc822#section-5)/[2822](https://tools.ietf.org/html/rfc2822#section-3.3) without time zone | `Tue, 01 Dec 2020 02:37:54` + `%a %d %b %T %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output without time zone | `Tue 01 Dec 02:37:54 2020` + `%a %b %e %T %Y` | [ctime](https://www.cplusplus.com/reference/ctime) format | `Tue Dec 1 02:37:54 2020` + `%s` | [UNIX](https://en.wikipedia.org/wiki/Unix_time) timestamp | `1606790274` + `%FT%TZ` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC | `2020-12-01T09:37:54Z` + `%+` | [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601)/[RFC 3339](https://tools.ietf.org/html/rfc3339) UTC with time zone | `2020-12-01T02:37:54-07:00` + `%a %d %b %T %Z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with time zone | `Tue 01 Dec 02:37:54 PST 2020` + `%a %d %b %T %z %Y`| [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone | `Tue 01 Dec 02:37:54 -0700 2020` + `%a %d %b %T %#z %Y` | [`date`](https://man7.org/linux/man-pages/man1/date.1.html) command output with numeric time zone (minutes can be missing or present) | `Tue 01 Dec 02:37:54 -07 2020` + + **Note**: the examples in this table are for 54 seconds after 2:37 am on December 1st, 2020 in Pacific Standard Time. + """ + name: "types" + required: false + warnings: [] + type: { + object: { + examples: [{ + status: "int" + duration: "float" + success: "bool" + timestamp_iso8601: "timestamp|%F" + timestamp_custom: "timestamp|%a %b %e %T %Y" + timestamp_unix: "timestamp|%F %T" + parent: { + child: "int" + } + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + logfmt_parser: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "logfmt_parser" + #ExampleConfig: { + title: string + configuration: { + drop_field: null + field: null + timezone: null + types: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: {} | {} | [{} | {}] | null + } + examples: [{ + title: "Heroku Router Log" + configuration: { + field: "message" + drop_field: true + timezone: null + type: null + types: { + bytes: "int" + status: "int" + } + inputs: null + } + input: { + log: { + message: "at=info method=GET path=/ host=myapp.herokuapp.com request_id=8601b555-6a83-4c12-8269-97c8e32cdb22 fwd=\"204.204.204.204\" dyno=web.1 connect=1ms service=18ms status=200 bytes=13 tls_version=tls1.1 protocol=http" + } + } + output: { + log: { + at: "info" + method: "GET" + path: "/" + host: "myapp.herokuapp.com" + request_id: "8601b555-6a83-4c12-8269-97c8e32cdb22" + fwd: "204.204.204.204" + dyno: "web.1" + connect: "1ms" + service: "18ms" + status: 200 + bytes: 13 + tls_version: "tls1.1" + protocol: "http" + } + } + }, { + title: "Loosely Structured" + configuration: { + field: "message" + drop_field: false + timezone: null + type: null + types: { + status: "int" + } + inputs: null + } + input: { + log: { + message: "info | Sent 200 in 54.2ms duration=54.2ms status=200" + } + } + output: { + log: { + message: "info | Sent 200 in 54.2ms duration=54.2ms status=200" + duration: "54.2ms" + status: 200 + } + } + }] + how_it_works: { + key_value_parsing: { + #Subsection: { + title: string + body: string + } + name: "key_value_parsing" + title: "Key/Value Parsing" + body: """ + This transform can be used for key/value parsing. [Logfmt](https://brandur.org/logfmt) refers + to a _loosely_ defined spec that parses a key/value pair delimited by a `=` + character. This section, and it's keywords, is primarily added to assist users + in finding this transform for these terms. + """ + } + quoting_values: { + #Subsection: { + title: string + body: string + } + name: "quoting_values" + title: "Quoting Values" + body: """ + Values can be quoted to capture spaces, and quotes can be escaped with `\\`. + For example + + ```text + key1="value with spaces" key2="value with spaces and \\"" + ``` + + Would result in the following `log` event: + + ```json title="log event" + { + "key1": "value with spaces", + "key2": "value with spaces and \\"" + } + ``` + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + format_specification: { + #Subsection: { + title: string + body: string + } + name: "format_specification" + title: "Format Specification" + body: """ + [Logfmt](https://brandur.org/logfmt) is, unfortunately, a very loosely defined format. There + is no official specification for the format and Vector makes a best effort to + parse key/value pairs delimited with a `=`. It works by splitting the `field`'s + value on non-quoted white-space and then splitting each token by a non-quoted + `=` character. This makes the parsing process somewhat flexible in that the + string does not need to be strictly formatted. + + For example, the following log line: + + ```js title="log event" + { + "message": "Hello world duration=2s user-agent="Firefox/47.3 Mozilla/5.0"" + } + ``` + + Will be successfully parsed into: + + ```js title="log event" + { + "message": "Hello world duration=2s user-agent="Firefox/47.3 Mozilla/5.0"", + "duration": "2s", + "user-agent": "Firefox/47.3 Mozilla/5.0" + } + ``` + """ + } + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + } + } + } + } + sinks: { + vector: { + kind: "sink" + title: "Vector" + description: "Sends data to another downstream Vector instance via the Vector source." + classes: { + commonly_used: false + delivery: "best_effort" + development: "beta" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + send_buffer_bytes: { + enabled: true + } + keepalive: { + enabled: true + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Vector" + thing: "a Vector instance" + url: "https://vector.dev/docs/" + versions: ">= 0.11.0" + connect_to: { + splunk: { + logs: { + setup: [{ + title: "Create a Splunk HEC endpoint" + description: "Follow the Splunk HEC setup docs to create a Splunk HEC endpoint." + detour: { + url: "https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector" + } + }, { + title: "Configure Vector" + description: """ + Splunk will provide you with a host and token. Copy those + values to the `host` and `token` options. + """ + vector: { + configure: { + sinks: { + splunk_hec: { + type: "splunk_hec" + host: "" + token: "" + } + } + } + } + }] + } + } + } + } + interface: { + socket: { + direction: "outgoing" + protocols: ["tcp"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + summary: true + set: true + } + } + configuration: { + address: { + description: "The downstream Vector address to connect to. The address _must_ include a port." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["92.12.333.224:5000"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the sink." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + vector: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "vector" + #ExampleConfig: { + title: string + configuration: { + address: null + type: null + inputs: null + buffer: null + encoding: null + healthcheck: null + keepalive: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + encoding: { + #Subsection: { + title: string + body: string + } + name: "encoding" + title: "Encoding" + body: """ + Data is encoded via Vector's [event protobuf](https://github.com/timberio/vector/blob/master/proto/event.proto) + before it is sent over the wire. + """ + } + communication_protocol: { + #Subsection: { + title: string + body: string + } + name: "communication_protocol" + title: "Communication Protocol" + body: """ + Upstream Vector instances forward data to downstream Vector + instances via the TCP protocol. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + tls: { + #Subsection: { + title: string + body: string + } + name: "tls" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols. You can + adjust TLS behavior via the `tls.*` options. + """ + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + message_acknowledgement: { + #Subsection: { + title: string + body: string + } + name: "message_acknowledgement" + title: "Message Acknowledgement" + body: """ + Currently, Vector does not perform any application level message + acknowledgement. While rare, this means the individual message + could be lost. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `vector` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + protobuf_decode_errors_total: { + description: "The total number of [Protocol Buffers](https://developers.google.com/protocol-buffers) errors thrown during communication between Vector instances." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "protobuf_decode_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + azure_monitor_logs: { + kind: "sink" + title: "Azure Monitor Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Azure"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 30000000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: true + } + to: { + service: { + name: "Azure Monitor logs" + thing: "a Azure Monitor logs account" + url: "https://azure.microsoft.com/en-us/services/monitor/" + versions: null + description: "[Azure Monitor](https://azure.microsoft.com/en-us/services/monitor/) is a service in Azure that provides performance and availability monitoring for applications and services in Azure, other cloud environments, or on-premises. Azure Monitor collects data from multiple sources into a common data platform where it can be analyzed for trends and anomalies." + } + interface: { + socket: { + api: { + title: "Azure Monitor logs API" + url: "https://docs.microsoft.com/en-us/rest/api/monitor/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + azure_resource_id: { + common: true + description: "[Resource ID](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/data-collector-api#request-headers) of the Azure resource the data should be associated with." + name: "azure_resource_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/subscriptions/11111111-1111-1111-1111-111111111111/resourceGroups/otherResourceGroup/providers/Microsoft.Storage/storageAccounts/examplestorage", "/subscriptions/11111111-1111-1111-1111-111111111111/resourceGroups/examplegroup/providers/Microsoft.SQL/servers/serverName/databases/databaseName"] + syntax: "literal" + } + } + } + customer_id: { + description: "The [unique identifier](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/data-collector-api#request-uri-parameters) for the Log Analytics workspace." + name: "customer_id" + required: true + warnings: [] + type: { + string: { + examples: ["5ce893d9-2c32-4b6c-91a9-b0887c2de2d6", "97ce69d9-b4be-4241-8dbd-d265edcf06c4"] + syntax: "literal" + } + } + } + host: { + common: true + description: "[Alternative host](https://docs.azure.cn/en-us/articles/guidance/developerdifferences#check-endpoints-in-azure) for dedicated Azure regions." + name: "host" + required: false + warnings: [] + type: { + string: { + default: "ods.opinsights.azure.com" + examples: ["ods.opinsights.azure.us", "ods.opinsights.azure.cn"] + syntax: "literal" + } + } + } + log_type: { + description: "The [record type of the data that is being submitted](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/data-collector-api#request-headers). Can only contain letters, numbers, and underscore (_), and may not exceed 100 characters." + name: "log_type" + required: true + warnings: [] + type: { + string: { + examples: ["MyTableName", "MyRecordType"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 30000000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + shared_key: { + description: "The [primary or the secondary key](https://docs.microsoft.com/en-us/azure/azure-monitor/platform/data-collector-api#authorization) for the Log Analytics workspace." + name: "shared_key" + required: true + warnings: [] + type: { + string: { + examples: ["${AZURE_MONITOR_SHARED_KEY_ENV_VAR}", "SERsIYhgMVlJB6uPsq49gCxNiruf6v0vhMYE+lfzbSGcXjdViZdV/e5pEMTYtw9f8SkVLf4LFlLCc2KxtRZfCA=="] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + azure_monitor_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "azure_monitor_logs" + #ExampleConfig: { + title: string + configuration: { + azure_resource_id: null + customer_id: null + host: null + log_type: null + shared_key: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + gcp_pubsub: { + kind: "sink" + title: "GCP PubSub" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["GCP"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 10485760 + max_events: 1000 + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 100 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "GCP PubSub" + thing: "a GCP PubSub pipeline" + url: "https://cloud.google.com/pubsub/" + versions: null + description: "[GCP Pub/Sub][urls.gcp_pubsub] is a fully-managed real-time messaging service that allows you to send and receive messages between independent applications on the Google Cloud Platform." + } + interface: { + socket: { + api: { + title: "GCP XML Interface" + url: "https://cloud.google.com/storage/docs/xml-api/overview" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + api_key: { + common: false + description: "A [Google Cloud API key][urls.gcp_authentication_api_key] used to authenticate access the pubsub project and topic. Either this or `credentials_path` must be set." + name: "api_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${GCP_API_KEY}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + credentials_path: { + common: true + description: """ + The filename for a Google Cloud service account credentials JSON file used to authenticate access to the pubsub project and topic. If this is unset, Vector checks the `GOOGLE_APPLICATION_CREDENTIALS` environment variable for a filename. + + If no filename is named, Vector will attempt to fetch an instance service account for the compute instance the program is running on. If Vector is not running on a GCE instance, you must define a credentials file as above. + """ + name: "credentials_path" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + } + endpoint: { + common: false + description: "The endpoint to which to send data." + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "https://pubsub.googleapis.com" + examples: ["https://us-central1-pubsub.googleapis.com"] + syntax: "literal" + } + } + } + project: { + description: "The project name to which to publish logs." + name: "project" + required: true + warnings: [] + type: { + string: { + examples: ["vector-123456"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10485760 + unit: "bytes" + } + } + } + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 100 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + topic: { + description: "The topic within the project to which to publish logs." + name: "topic" + required: true + warnings: [] + type: { + string: { + examples: ["this-is-a-topic"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + gcp_pubsub: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: { + GOOGLE_APPLICATION_CREDENTIALS: { + description: "The filename for a Google Cloud service account credentials JSON file used for authentication." + name: "GOOGLE_APPLICATION_CREDENTIALS" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "gcp_pubsub" + #ExampleConfig: { + title: string + configuration: { + api_key: null + credentials_path: null + endpoint: null + project: null + topic: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "\(_service).\(_action)" + } + platform: "gcp" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "pubsub.topics.get" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "pubsub.topics.publish" + }] + }] + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + gcp_authentication: { + #Subsection: { + title: string + body: string + } + name: "gcp_authentication" + title: "GCP Authentication" + body: """ + GCP offers a [variety of authentication methods](https://cloud.google.com/docs/authentication/) and + Vector is concerned with the [server to server methods](https://cloud.google.com/docs/authentication/production) + and will find credentials in the following order: + + 1. If the [`credentials_path`](#credentials_path) option is set. + 1. If the `api_key` option is set. + 1. If the [`GOOGLE_APPLICATION_CREDENTIALS`](#google_application_credentials) envrionment variable is set. + 1. Finally, Vector will check for an [instance service account](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually). + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + console: { + kind: "sink" + title: "Console" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: false + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: false + } + tls: { + enabled: false + } + to: { + service: { + name: "STDOUT" + thing: "a STDOUT stream" + url: "https://en.wikipedia.org/wiki/Standard_streams#Standard_output_(stdout)" + versions: null + } + interface: { + stdout: {} + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + target: { + common: true + description: "The [standard stream](https://en.wikipedia.org/wiki/Standard_streams) to write to." + name: "target" + required: false + warnings: [] + type: { + string: { + default: "stdout" + enum: { + stdout: "Output will be written to [STDOUT](https://en.wikipedia.org/wiki/Standard_streams#Standard_output_(stdout))" + stderr: "Output will be written to [STDERR](https://en.wikipedia.org/wiki/Standard_streams#Standard_error_(stderr))" + } + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + console: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "console" + #ExampleConfig: { + title: string + configuration: { + target: null + type: null + inputs: null + encoding: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + influxdb_metrics: { + kind: "sink" + title: "InfluxDB Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["InfluxData"] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: null + max_events: 20 + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "InfluxDB" + thing: "an InfluxDB database" + url: "https://www.influxdata.com/products/influxdb-overview/" + versions: null + description: "[InfluxDB](https://www.influxdata.com/products/influxdb-overview/) is an open-source time series database developed by InfluxData. It is written in Go and optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet of Things sensor data, and real-time analytics." + } + interface: { + socket: { + api: { + title: "Influx HTTP API" + url: "https://v2.docs.influxdata.com/v2.0/api/#tag/Write" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + bucket: { + description: "The destination bucket for writes into InfluxDB 2." + groups: ["v2"] + name: "bucket" + required: true + warnings: [] + type: { + string: { + examples: ["vector-bucket", "4d2225e4d3d49f75"] + syntax: "literal" + } + } + } + consistency: { + category: "Persistence" + common: true + description: "Sets the write consistency for the point for InfluxDB 1." + groups: ["v1"] + name: "consistency" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["any", "one", "quorum", "all"] + syntax: "literal" + } + } + } + database: { + description: "Sets the target database for the write into InfluxDB 1." + groups: ["v1"] + name: "database" + required: true + warnings: [] + type: { + string: { + examples: ["vector-database", "iot-store"] + syntax: "literal" + } + } + } + endpoint: { + description: "The endpoint to send data to." + groups: ["v1", "v2"] + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["http://localhost:8086/", "https://us-west-2-1.aws.cloud1.influxdata.com", "https://us-west-2-1.aws.cloud2.influxdata.com"] + syntax: "literal" + } + } + } + org: { + category: "Auth" + description: "Specifies the destination organization for writes into InfluxDB 2." + groups: ["v2"] + name: "org" + required: true + warnings: [] + type: { + string: { + examples: ["my-org", "33f2cff0a28e5b63"] + syntax: "literal" + } + } + } + password: { + category: "Auth" + common: true + description: "Sets the password for authentication if you’ve enabled authentication for the write into InfluxDB 1." + groups: ["v1"] + name: "password" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${INFLUXDB_PASSWORD}", "influxdb4ever"] + syntax: "literal" + } + } + } + retention_policy_name: { + category: "Persistence" + common: true + description: "Sets the target retention policy for the write into InfluxDB 1." + groups: ["v1"] + name: "retention_policy_name" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["autogen", "one_day_only"] + syntax: "literal" + } + } + } + tags: { + common: false + description: "A set of additional fields that will be attached to each LineProtocol as a tag. Note: If the set of tag values has high cardinality this also increase cardinality in InfluxDB." + groups: ["v1", "v2"] + name: "tags" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["field1", "parent.child_field"] + syntax: "field_path" + } + } + } + } + } + } + token: { + category: "Auth" + description: "[Authentication token](https://v2.docs.influxdata.com/v2.0/security/tokens/) for InfluxDB 2." + groups: ["v2"] + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${INFLUXDB_TOKEN}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + default_namespace: { + common: true + description: """ + Used as a namespace for metrics that don't have it. + A namespace will be prefixed to a metric's name. + """ + name: "default_namespace" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["service"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 20 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + username: { + category: "Auth" + common: true + description: "Sets the username for authentication if you’ve enabled authentication for the write into InfluxDB 1." + groups: ["v1"] + name: "username" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["todd", "vector-source"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + influxdb_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "influxdb_metrics" + #ExampleConfig: { + title: string + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + default_namespace: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + examples: [{ + title: "Counter" + configuration: { + default_namespace: "service" + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 1.5 + } + tags: { + host: "my-host.local" + } + } + } + output: "service.logins,metric_type=counter,host=my-host.local value=1.5 1542182950000000011" + }, { + title: "Distribution" + notes: "For distributions with histogram, summary is computed." + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + default_namespace: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "incremental" + name: "sparse_stats" + namespace: "app" + distribution: { + samples: [{ + value: 1.0 + rate: 1 + }, { + value: 5.0 + rate: 2 + }, { + value: 3.0 + rate: 3 + }] + statistic: "histogram" + } + tags: { + host: "my-host.local" + } + } + } + output: "app.sparse_stats,metric_type=distribution,host=my-host.local avg=3.333333,count=6,max=5,median=3,min=1,quantile_0.95=5,sum=20 1542182950000000011" + }, { + title: "Gauge" + configuration: { + default_namespace: "service" + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "absolute" + name: "memory_rss" + namespace: "app" + gauge: { + value: 1.5 + } + tags: { + host: "my-host.local" + } + } + } + output: "app.memory_rss,metric_type=gauge,host=my-host.local value=1.5 1542182950000000011" + }, { + title: "Histogram" + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + default_namespace: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "absolute" + name: "requests" + histogram: { + buckets: [{ + upper_limit: 1.0 + count: 2 + }, { + upper_limit: 2.1 + count: 5 + }, { + upper_limit: 3.0 + count: 10 + }] + count: 17 + sum: 46.2 + } + tags: { + host: "my-host.local" + } + } + } + output: "requests,metric_type=histogram,host=my-host.local bucket_1=2i,bucket_2.1=5i,bucket_3=10i,count=17i,sum=46.2 1542182950000000011" + }, { + title: "Set" + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + default_namespace: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "incremental" + name: "users" + set: { + values: ["first", "another", "last"] + } + tags: { + host: "my-host.local" + } + } + } + output: "users,metric_type=set,host=my-host.local value=3 154218295000000001" + }, { + title: "Summary" + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + default_namespace: null + type: null + inputs: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: { + metric: { + kind: "absolute" + name: "requests" + summary: { + quantiles: [{ + upper_limit: 0.01 + value: 1.5 + }, { + upper_limit: 0.5 + value: 2.0 + }, { + upper_limit: 0.99 + value: 3.0 + }] + count: 6 + sum: 12.1 + } + tags: { + host: "my-host.local" + } + } + } + output: "requests,metric_type=summary,host=my-host.local count=6i,quantile_0.01=1.5,quantile_0.5=2,quantile_0.99=3,sum=12.1 1542182950000000011" + }] + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + aws_sqs: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + queue_url: { + description: "The URL of the Amazon SQS queue to which messages are sent." + name: "queue_url" + required: true + warnings: [] + type: { + string: { + examples: ["https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + message_group_id: { + common: false + description: "The tag that specifies that a message belongs to a specific message group. Can be applied only to FIFO queues." + name: "message_group_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["vector", "vector-%Y-%m-%d"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_sqs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_sqs" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + title: "Amazon Simple Queue Service (SQS)" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "stream" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + queue_url: null + message_group_id: null + type: null + inputs: null + buffer: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS Simple Queue Service" + thing: "an AWS Simple Queue Service queue" + url: "https://aws.amazon.com/sqs/" + versions: null + description: "[Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) is a fully managed message queuing service that enables you to decouple and scale microservices, distributed systems, and serverless applications." + } + interface: { + socket: { + api: { + title: "Amazon Simple Queue Service API" + url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/Welcome.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html" + action: "sqs:GetQueueAttributes" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html" + action: "sqs:SendMessage" + }] + }] + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + missing_keys_total: { + description: "The total number of failed template renders due to missed keys from the event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "missing_keys_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + socket: { + kind: "sink" + title: "Socket" + classes: { + commonly_used: true + delivery: "best_effort" + development: "stable" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + send_buffer_bytes: { + enabled: true + relevant_when: "mode = `tcp` or mode = `udp` && os = `unix`" + } + keepalive: { + enabled: true + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "socket receiver" + thing: "a socket receiver" + url: "https://en.wikipedia.org/wiki/Network_socket" + versions: null + } + interface: { + socket: { + direction: "outgoing" + protocols: ["tcp", "udp", "unix"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + address: { + description: "The address to connect to. The address _must_ include a port." + name: "address" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["92.12.333.224:5000"] + syntax: "literal" + } + } + } + mode: { + description: "The type of socket to use." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + tcp: "TCP socket" + udp: "UDP socket" + unix: "Unix domain socket" + } + examples: ["tcp", "udp", "unix"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the sink." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + path: { + description: "The unix socket path. This should be the absolute path." + name: "path" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/socket"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + socket: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "socket" + #ExampleConfig: { + title: string + configuration: { + address: null + mode: null + path: null + type: null + inputs: null + buffer: null + encoding: null + healthcheck: null + keepalive: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + connection_errors_total: { + description: "The total number of connection errors for this Vector instance." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "connection_errors_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + } + } + influxdb_logs: { + kind: "sink" + title: "InfluxDB Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["InfluxData"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "InfluxDB" + thing: "an InfluxDB database" + url: "https://www.influxdata.com/products/influxdb-overview/" + versions: null + description: "[InfluxDB](https://www.influxdata.com/products/influxdb-overview/) is an open-source time series database developed by InfluxData. It is written in Go and optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet of Things sensor data, and real-time analytics." + } + interface: { + socket: { + api: { + title: "Influx HTTP API" + url: "https://v2.docs.influxdata.com/v2.0/api/#tag/Write" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + bucket: { + description: "The destination bucket for writes into InfluxDB 2." + groups: ["v2"] + name: "bucket" + required: true + warnings: [] + type: { + string: { + examples: ["vector-bucket", "4d2225e4d3d49f75"] + syntax: "literal" + } + } + } + consistency: { + category: "Persistence" + common: true + description: "Sets the write consistency for the point for InfluxDB 1." + groups: ["v1"] + name: "consistency" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["any", "one", "quorum", "all"] + syntax: "literal" + } + } + } + database: { + description: "Sets the target database for the write into InfluxDB 1." + groups: ["v1"] + name: "database" + required: true + warnings: [] + type: { + string: { + examples: ["vector-database", "iot-store"] + syntax: "literal" + } + } + } + endpoint: { + description: "The endpoint to send data to." + groups: ["v1", "v2"] + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["http://localhost:8086/", "https://us-west-2-1.aws.cloud1.influxdata.com", "https://us-west-2-1.aws.cloud2.influxdata.com"] + syntax: "literal" + } + } + } + org: { + category: "Auth" + description: "Specifies the destination organization for writes into InfluxDB 2." + groups: ["v2"] + name: "org" + required: true + warnings: [] + type: { + string: { + examples: ["my-org", "33f2cff0a28e5b63"] + syntax: "literal" + } + } + } + password: { + category: "Auth" + common: true + description: "Sets the password for authentication if you’ve enabled authentication for the write into InfluxDB 1." + groups: ["v1"] + name: "password" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${INFLUXDB_PASSWORD}", "influxdb4ever"] + syntax: "literal" + } + } + } + retention_policy_name: { + category: "Persistence" + common: true + description: "Sets the target retention policy for the write into InfluxDB 1." + groups: ["v1"] + name: "retention_policy_name" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["autogen", "one_day_only"] + syntax: "literal" + } + } + } + tags: { + common: false + description: "A set of additional fields that will be attached to each LineProtocol as a tag. Note: If the set of tag values has high cardinality this also increase cardinality in InfluxDB." + groups: ["v1", "v2"] + name: "tags" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["field1", "parent.child_field"] + syntax: "field_path" + } + } + } + } + } + } + token: { + category: "Auth" + description: "[Authentication token](https://v2.docs.influxdata.com/v2.0/security/tokens/) for InfluxDB 2." + groups: ["v2"] + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${INFLUXDB_TOKEN}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + namespace: { + description: "A prefix that will be added to all logs names." + groups: ["v1", "v2"] + name: "namespace" + required: true + warnings: [] + type: { + string: { + examples: ["service"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + username: { + category: "Auth" + common: true + description: "Sets the username for authentication if you’ve enabled authentication for the write into InfluxDB 1." + groups: ["v1"] + name: "username" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["todd", "vector-source"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + influxdb_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "influxdb_logs" + #ExampleConfig: { + title: string + configuration: { + bucket: null + consistency: null + database: null + endpoint: null + org: null + password: null + retention_policy_name: null + tags: null + token: null + username: null + namespace: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + mapping: { + #Subsection: { + title: string + body: string + } + name: "mapping" + title: "Mapping Log Fields" + body: """ + InfluxDB uses [line protocol](https://v2.docs.influxdata.com/v2.0/reference/syntax/line-protocol/) to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point. + + A `Log Event` event contains an arbitrary set of fields (key/value pairs) that describe the event. + + The following matrix outlines how Log Event fields are mapped into InfluxDB Line Protocol: + + | Field | Line Protocol | | + |---------------|-------------------| + | host | tag | + | message | field | + | source_type | tag | + | timestamp | timestamp | + | [custom-key] | field | + + The default behavior can be overridden by a `tags` configuration. + """ + sub_sections: [{ + title: "Mapping Example" + body: """ + The following event: + + ```js + { + "host": "my.host.com", + "message": "<13>Feb 13 20:07:26 74794bfb6795 root[8539]: i am foobar", + "timestamp": "2019-11-01T21:15:47+00:00", + "custom_field": "custom_value" + } + ``` + + Will be mapped to Influx's line protocol: + + ```influxdb_line_protocol + ns.vector,host=my.host.com,metric_type=logs custom_field="custom_value",message="<13>Feb 13 20:07:26 74794bfb6795 root[8539]: i am foobar" 1572642947000000000 + ``` + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + prometheus_remote_write: { + kind: "sink" + title: "Prometheus Remote Write" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: null + max_events: 1000 + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: false + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Prometheus" + thing: "a Prometheus database" + url: "https://prometheus.io/" + versions: null + description: "[Prometheus](https://prometheus.io/) is a pull-based monitoring system that scrapes metrics from configured endpoints, stores them efficiently, and supports a powerful query language to compose dynamic information from a variety of otherwise unrelated data points." + } + interface: { + socket: { + api: { + title: "Prometheus remote_write protocol" + url: "https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + High cardinality metric names and labels are discouraged by + Prometheus as they can provide performance and reliability + problems. You should consider alternative strategies to reduce + the cardinality. Vector offers a [`tag_cardinality_limit` transform][docs.transforms.tag_cardinality_limit] + as a way to protect against this. + """] + notices: [] + } + configuration: { + endpoint: { + description: "The endpoint URL to send data to." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["https://localhost:8087/"] + syntax: "literal" + } + } + } + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + default_namespace: { + common: true + description: """ + Used as a namespace for metrics that don't have it. + A namespace will be prefixed to a metric's name. + It should follow Prometheus [naming conventions](https://prometheus.io/docs/practices/naming/#metric-names). + """ + name: "default_namespace" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["service"] + syntax: "literal" + } + } + } + buckets: { + common: false + description: "Default buckets to use for aggregating [distribution][docs.data-model.metric#distribution] metrics into histograms." + name: "buckets" + required: false + warnings: [] + type: { + array: { + default: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0] + items: { + type: { + float: { + examples: [0.005, 0.01] + } + } + } + } + } + } + quantiles: { + common: false + description: "Quantiles to use for aggregating [distribution][docs.data-model.metric#distribution] metrics into a summary." + name: "quantiles" + required: false + warnings: [] + type: { + array: { + default: [0.5, 0.75, 0.9, 0.95, 0.99] + items: { + type: { + float: { + examples: [0.5, 0.75, 0.9, 0.95, 0.99] + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + tenant_id: { + common: false + description: "If set, a header named `X-Scope-OrgID` will be added to outgoing requests with the text of this setting. This may be used by Cortex or other remote services to identify the tenant making the request." + name: "tenant_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["my-domain"] + syntax: "template" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + prometheus_remote_write: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "prometheus_remote_write" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + auth: null + default_namespace: null + buckets: null + quantiles: null + tenant_id: null + type: null + inputs: null + batch: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: false + summary: true + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + aws_s3: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + acl: { + category: "ACL" + common: false + description: "Canned ACL to apply to the created objects. For more information, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl)." + name: "acl" + required: false + warnings: [] + type: { + string: { + default: null + enum: { + private: "Owner gets `FULL_CONTROL`. No one else has access rights (default)." + "public-read": "Owner gets `FULL_CONTROL`. The AllUsers group gets `READ` access." + "public-read-write": "Owner gets `FULL_CONTROL`. The AllUsers group gets `READ` and `WRITE` access. Granting this on a bucket is generally not recommended." + "aws-exec-read": "Owner gets `FULL_CONTROL`. Amazon EC2 gets `READ` access to `GET` an Amazon Machine Image (AMI) bundle from Amazon S3." + "authenticated-read": "Owner gets `FULL_CONTROL`. The AuthenticatedUsers group gets `READ` access." + "bucket-owner-read": "Object owner gets `FULL_CONTROL`. Bucket owner gets `READ. access." + "bucket-owner-full-control": "Both the object owner and the bucket owner get `FULL_CONTROL` over the object." + "log-delivery-write": "The LogDelivery group gets `WRITE` and `READ_ACP` permissions on the bucket. For more information about logs, see [Amazon S3 Server Access Logging](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html)." + } + syntax: "literal" + } + } + } + bucket: { + description: "The S3 bucket name. Do not include a leading `s3://` or a trailing `/`." + name: "bucket" + required: true + warnings: [] + type: { + string: { + examples: ["my-bucket"] + syntax: "literal" + } + } + } + content_encoding: { + category: "Content Type" + common: false + description: "Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. By default calculated from `compression` value." + name: "content_encoding" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["gzip"] + syntax: "literal" + } + } + } + content_type: { + category: "Content Type" + common: false + description: "A standard MIME type describing the format of the contents." + name: "content_type" + required: false + warnings: [] + type: { + string: { + default: "text/x-log" + syntax: "literal" + } + } + } + filename_append_uuid: { + category: "File Naming" + common: false + description: "Whether or not to append a UUID v4 token to the end of the file. This ensures there are no name collisions high volume use cases." + name: "filename_append_uuid" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + filename_extension: { + category: "File Naming" + common: false + description: "The filename extension to use in the object name." + name: "filename_extension" + required: false + warnings: [] + type: { + string: { + default: "log" + syntax: "literal" + } + } + } + filename_time_format: { + category: "File Naming" + common: false + description: "The format of the resulting object file name. [`strftime` specifiers](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) are supported." + name: "filename_time_format" + required: false + warnings: [] + type: { + string: { + default: "%s" + syntax: "strftime" + } + } + } + grant_full_control: { + category: "ACL" + common: false + description: "Gives the named [grantee](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee) READ, READ_ACP, and WRITE_ACP permissions on the created objects." + name: "grant_full_control" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", "person@email.com", "http://acs.amazonaws.com/groups/global/AllUsers"] + syntax: "literal" + } + } + } + grant_read: { + category: "ACL" + common: false + description: "Allows the named [grantee](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee) to read the created objects and their metadata." + name: "grant_read" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", "person@email.com", "http://acs.amazonaws.com/groups/global/AllUsers"] + syntax: "literal" + } + } + } + grant_read_acp: { + category: "ACL" + common: false + description: "Allows the named [grantee](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee) to read the created objects' ACL." + name: "grant_read_acp" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", "person@email.com", "http://acs.amazonaws.com/groups/global/AllUsers"] + syntax: "literal" + } + } + } + grant_write_acp: { + category: "ACL" + common: false + description: "Allows the named [grantee](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee) to write the created objects' ACL." + name: "grant_write_acp" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be", "person@email.com", "http://acs.amazonaws.com/groups/global/AllUsers"] + syntax: "literal" + } + } + } + key_prefix: { + category: "File Naming" + common: true + description: "A prefix to apply to all object key names. This should be used to partition your objects, and it's important to end this value with a `/` if you want this to be the root S3 \"folder\"." + name: "key_prefix" + required: false + warnings: [] + type: { + string: { + default: "date=%F/" + examples: ["date=%F/", "date=%F/hour=%H/", "year=%Y/month=%m/day=%d/", "application_id={{ application_id }}/date=%F/"] + syntax: "template" + } + } + } + server_side_encryption: { + category: "Encryption" + common: false + description: "The Server-side Encryption algorithm used when storing these objects." + name: "server_side_encryption" + required: false + warnings: [] + type: { + string: { + default: null + enum: { + AES256: "256-bit Advanced Encryption Standard" + "aws:kms": "AWS managed key encryption" + } + syntax: "literal" + } + } + } + ssekms_key_id: { + category: "Encryption" + common: false + description: "If `server_side_encryption` has the value `\"aws.kms\"`, this specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical customer managed customer master key (CMK) that will used for the created objects. If not specified, Amazon S3 uses the AWS managed CMK in AWS to protect the data." + name: "ssekms_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["abcd1234"] + syntax: "literal" + } + } + } + storage_class: { + category: "Storage" + common: false + description: "The storage class for the created objects. See [the S3 Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) for more details." + name: "storage_class" + required: false + warnings: [] + type: { + string: { + default: null + enum: { + STANDARD: "The default storage class. If you don't specify the storage class when you upload an object, Amazon S3 assigns the STANDARD storage class." + REDUCED_REDUNDANCY: "Designed for noncritical, reproducible data that can be stored with less redundancy than the STANDARD storage class. AWS recommends that you not use this storage class. The STANDARD storage class is more cost effective. " + INTELLIGENT_TIERING: "Stores objects in two access tiers: one tier that is optimized for frequent access and another lower-cost tier that is optimized for infrequently accessed data." + STANDARD_IA: "Amazon S3 stores the object data redundantly across multiple geographically separated Availability Zones (similar to the STANDARD storage class)." + ONEZONE_IA: "Amazon S3 stores the object data in only one Availability Zone." + GLACIER: "Use for archives where portions of the data might need to be retrieved in minutes." + DEEP_ARCHIVE: "Use for archiving data that rarely needs to be accessed." + } + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: true + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10000000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 300 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "gzip" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["ndjson", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 50 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 250 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + tags: { + common: false + category: "Tags" + description: "The tag-set for the object." + name: "tags" + required: false + warnings: [] + type: { + object: { + examples: [{ + Tag1: "Value1" + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_s3: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_s3" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + cross_account: { + #Subsection: { + title: string + body: string + } + name: "cross_account" + title: "Cross account object writing" + body: """ + If you're using Vector to write objects across AWS accounts then you should + consider setting the `grant_full_control` option to the bucket owner's + canonical user ID. AWS provides a + [full tutorial](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example3.html) for this use case. If + don't know the bucket owner's canonical ID you can find it by following + [this tutorial](https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId). + """ + } + object_acl: { + #Subsection: { + title: string + body: string + } + name: "object_acl" + title: "Object Access Control List (ACL)" + body: """ + AWS S3 supports [access control lists (ACL)](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) for buckets and + objects. In the context of Vector, only object ACLs are relevant (Vector does + not create or modify buckets). You can set the object level ACL by using one + of the `acl`, `grant_full_control`, `grant_read`, `grant_read_acp`, or + `grant_write_acp` options. + """ + sub_sections: [{ + title: "`acl.*` vs `grant_*` options" + body: """ + The `grant_*` options name a specific entity to grant access to. The `acl` + options is one of a set of [specific canned ACLs](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) that + can only name the owner or world. + """ + }] + } + object_naming: { + #Subsection: { + title: string + body: string + } + name: "object_naming" + title: "Object naming" + body: """ + By default, Vector will name your S3 objects in the following format: + + + + + + ```text + -.log + ``` + + For example: + + ```text + date=2019-06-18/1560886634-fddd7a0e-fad9-4f7e-9bce-00ae5debc563.log + ``` + + + + + ```text + -.log.gz + ``` + + For example: + + ```text + date=2019-06-18/1560886634-fddd7a0e-fad9-4f7e-9bce-00ae5debc563.log.gz + ``` + + + + + Vector appends a [UUIDV4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) token to ensure there are no name + conflicts in the unlikely event 2 Vector instances are writing data at the same + time. + + You can control the resulting name via the `key_prefix`, `filename_time_format`, + and `filename_append_uuid` options. + """ + } + object_tags_and_metadata: { + #Subsection: { + title: string + body: string + } + name: "object_tags_and_metadata" + title: "Object Tags & metadata" + body: """ + Vector currently only supports [AWS S3 object tags](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/add-object-tags.html) and does + _not_ support [object metadata](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-metadata). If you require metadata + support see [issue #1694](https://github.com/timberio/vector/issues/1694). + + We believe tags are more flexible since they are separate from the actual S3 + object. You can freely modify tags without modifying the object. Conversely, + object metadata requires a full rewrite of the object to make changes. + """ + } + server_side_encryption: { + #Subsection: { + title: string + body: string + } + name: "server_side_encryption" + title: "Server-Side Encryption (SSE)" + body: """ + AWS S3 offers [server-side encryption](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). You can apply defaults + at the bucket level or set the encryption at the object level. In the context, + of Vector only the object level is relevant (Vector does not create or modify + buckets). Although, we recommend setting defaults at the bucket level whne + possible. You can explicitly set the object level encryption via the + `server_side_encryption` option. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + storage_class: { + #Subsection: { + title: string + body: string + } + name: "storage_class" + title: "Storage class" + body: """ + AWS S3 offers [storage classes](https://aws.amazon.com/s3/storage-classes/). You can apply + defaults, and rules, at the bucket level or set the storage class at the object + level. In the context of Vector only the object level is relevant (Vector does + not create or modify buckets). You can set the storage class via the + `storage_class` option. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + title: "AWS S3" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + acl: null + bucket: null + content_encoding: null + content_type: null + filename_append_uuid: null + filename_extension: null + filename_time_format: null + grant_full_control: null + grant_read: null + grant_read_acp: null + grant_write_acp: null + key_prefix: null + server_side_encryption: null + ssekms_key_id: null + storage_class: null + tags: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: true + max_bytes: 10000000 + max_events: null + timeout_secs: 300 + } + compression: { + enabled: true + default: "gzip" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["ndjson", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 50 + rate_limit_duration_secs: 1 + rate_limit_num: 250 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS S3" + thing: "an AWS S3 bucket" + url: "https://aws.amazon.com/s3/" + versions: null + description: "[Amazon Simple Storage Service (Amazon S3)](https://aws.amazon.com/s3/) is a scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data and applications on Amazon Web Services. It is very commonly used to store log data." + connect_to: { + vector: { + logs: { + setup: [{ + title: "Create an AWS SQS queue" + description: "Create an AWS SQS queue for Vector to consume bucket notifications from." + detour: { + url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-create-queue.html" + } + }, { + title: "Publish S3 bucket notifications to the queue" + description: """ + Configure S3 to publish Bucket notifications to your previously created SQS queue. + Ensure that it only publishes the following events: + + - PUT + - POST + - COPY + - Multipart upload completed + + These represent object creation events and ensure Vector does not double process + S3 objects. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonS3/latest/dev/ways-to-add-notification-config-to-bucket.html" + } + }, { + title: "Configure Vector" + description: """ + Using the SQS queue URL provided to you by AWS, configure the Vector `aws_s3` + source to use the SQS queue via the `sqs.queue_url` option. + """ + vector: { + configure: { + sources: { + aws_s3: { + type: "aws_s3" + sqs: { + queue_url: "" + } + } + } + } + } + }] + } + } + } + } + interface: { + socket: { + api: { + title: "AWS S3 API" + url: "https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_endpoint" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/AmazonS3/latest/APIReference/API_HeadBucket.html" + action: "s3:HeadBucket" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/AmazonS3/latest/APIReference/API_ListBucket.html" + action: "s3:ListBucket" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/AmazonS3/latest/APIReference/API_PutObject.html" + action: "s3:PutObject" + }] + }] + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + datadog_metrics: { + kind: "sink" + title: "Datadog Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["Datadog"] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: null + max_events: 20 + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: false + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "Datadog metrics" + thing: "a Datadog metrics database" + url: "https://docs.datadoghq.com/metrics/" + versions: null + description: "[Datadog](https://www.datadoghq.com) is a monitoring service for cloud-scale applications, providing monitoring of servers, databases, tools, and services, through a SaaS-based data analytics platform." + } + interface: { + socket: { + api: { + title: "Datadog metrics API" + url: "https://docs.datadoghq.com/api/v1/metrics/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + api_key: { + description: "Datadog [API key](https://docs.datadoghq.com/api/?lang=bash#authentication)" + name: "api_key" + required: true + warnings: [] + type: { + string: { + examples: ["${DATADOG_API_KEY_ENV_VAR}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + endpoint: { + common: false + description: "The endpoint to send data to." + name: "endpoint" + relevant_when: "region is not set" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.1:8080", "example.com:12345"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 20 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + default_namespace: { + common: true + description: """ + Used as a namespace for metrics that don't have it. + A namespace will be prefixed to a metric's name. + """ + name: "default_namespace" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["service"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + datadog_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "datadog_metrics" + #ExampleConfig: { + title: string + configuration: { + api_key: null + endpoint: null + default_namespace: null + type: null + inputs: null + batch: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: false + set: false + summary: false + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + nats: { + kind: "sink" + title: "NATS" + classes: { + commonly_used: false + delivery: "best_effort" + development: "beta" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: false + } + tls: { + enabled: false + } + to: { + service: { + name: "NATS" + thing: "a NATS server" + url: "https://nats.io/" + versions: null + description: "[NATS.io](https://nats.io/) is a simple, secure and high performance open source messaging system for cloud native applications, IoT messaging, and microservices architectures. NATS.io is a Cloud Native Computing Foundation project." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["tcp"] + ssl: "disabled" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + url: { + description: "The NATS URL to connect to. The url _must_ take the form of `nats://server:port`." + name: "url" + required: true + warnings: [] + type: { + string: { + examples: ["nats://demo.nats.io", "nats://127.0.0.1:4222"] + syntax: "literal" + } + } + } + subject: { + description: "The NATS subject to publish messages to." + name: "subject" + required: true + warnings: [] + type: { + string: { + examples: ["{{ host }}", "foo", "time.us.east", "time.*.east", "time.>", ">"] + syntax: "template" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + name: { + common: false + description: "A name assigned to the NATS connection." + name: "name" + required: false + warnings: [] + type: { + string: { + default: "vector" + examples: ["foo", "API Name Option Example"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + nats: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "nats" + #ExampleConfig: { + title: string + configuration: { + url: null + subject: null + name: null + type: null + inputs: null + encoding: null + healthcheck: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + missing_keys_total: { + description: "The total number of failed template renders due to missed keys from the event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "missing_keys_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + send_errors_total: { + description: "The total number of errors sending messages." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "send_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + } + } + aws_cloudwatch_logs: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + create_missing_group: { + common: true + description: "Dynamically create a [log group](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) if it does not already exist. This will ignore `create_missing_stream` directly after creating the group and will create the first stream." + name: "create_missing_group" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + create_missing_stream: { + common: true + description: "Dynamically create a [log stream](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) if it does not already exist." + name: "create_missing_stream" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + group_name: { + description: "The [group name](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) of the target CloudWatch Logs stream." + name: "group_name" + required: true + warnings: [] + type: { + string: { + examples: ["group-name", "{{ file }}"] + syntax: "template" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1048576 + unit: "bytes" + } + } + } + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 10000 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + stream_name: { + description: "The [stream name](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) of the target CloudWatch Logs stream." + name: "stream_name" + required: true + warnings: [] + type: { + string: { + examples: ["{{ host }}", "%Y-%m-%d", "stream-name"] + syntax: "template" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_cloudwatch_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_cloudwatch_logs" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + title: "AWS Cloudwatch Logs" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + create_missing_group: null + create_missing_stream: null + group_name: null + stream_name: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1048576 + max_events: 10000 + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: false + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS Cloudwatch logs" + thing: "an AWS Cloudwatch logs stream" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html" + versions: null + description: "[Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and management service that provides data and actionable insights for AWS, hybrid, and on-premises applications, and infrastructure resources. With CloudWatch, you can collect and access all your performance and operational data in form of logs and metrics from a single platform." + connect_to: { + aws_kinesis_firehose: { + logs: { + setup: [{ + title: "Stream CloudWatch logs to Firehose" + description: """ + Using your configured AWS Firehose delivery stream, we'll need to + stream AWS Cloudwatch Logs to it. We achieve this through AWS Cloudwatch Logs + subscriptions. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html#FirehoseExample" + } + }] + } + } + aws_s3: { + logs: { + description: """ + AWS Cloudwatch logs can export log data to S3 which can then be + imported by Vector via the `aws_s3` source. Please note, this is + a single export, not a stream of data. If you want Vector to + continuously ingest AWS Cloudwatch logs data you will need to + follow the AWS Cloudwatch logs to AWS Kinesis tutorial. + """ + setup: [{ + title: "Export AWS Cloudwatch logs data to AWS S3" + description: """ + Follow the AWS CloudWatch to S3 export guide to export + your Cloudwatch logs data to the S3 bucket of your choice. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/S3Export.html" + } + }] + } + } + } + } + interface: { + socket: { + api: { + title: "AWS Cloudwatch logs API" + url: "https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/logs/latest/APIReference/API_CreateLogGroup.html" + required_when: "[`create_missing_group`](#create_missing_group) is set to `true`" + action: "logs:CreateLogGroup" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/logs/latest/APIReference/API_CreateLogStream.html" + required_when: "[`create_missing_stream`](#create_missing_stream) is set to `true`" + action: "logs:CreateLogStream" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/logs/latest/APIReference/API_DescribeLogGroups.html" + action: "logs:DescribeLogGroups" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/logs/latest/APIReference/API_DescribeLogStreams.html" + action: "logs:DescribeLogStreams" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/logs/latest/APIReference/API_PutLogEvents.html" + action: "logs:PutLogEvents" + }] + }] + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + elasticsearch: { + kind: "sink" + title: "Elasticsearch" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS", "Azure", "Elastic", "GCP"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 10490000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: true + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Elasticsearch" + thing: "an Elasticsearch database" + url: "https://www.elastic.co/products/elasticsearch" + versions: null + description: "[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a search engine based on the Lucene library. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents. As a result, it is very commonly used to store and analyze log data. It ships with Kibana which is a simple interface for visualizing and exploring data in Elasticsearch." + } + interface: { + socket: { + api: { + title: "Elasticsearch bulk API" + url: "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [#""" + Elasticsearch's Data streams feature requires Vector to be configured with the `create` `bulk_action`. *This is not enabled by default.* + """#] + warnings: [] + notices: [] + } + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${ELASTICSEARCH_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + aws: "Authentication strategy used for [AWS' hosted Elasticsearch service](https://aws.amazon.com/elasticsearch-service/)." + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + } + examples: ["aws", "basic"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${ELASTICSEARCH_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + aws: { + common: false + category: "Aws" + description: "Options for the AWS connections." + name: "aws" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + region: { + common: true + description: "The [AWS region][urls.aws_regions] of the target service. This defaults to the region named in the endpoint parameter, or the value of the `$AWS_REGION` or `$AWS_DEFAULT_REGION` environment variables if that cannot be determined, or \"us-east-1\"." + name: "region" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["us-east-1"] + syntax: "literal" + } + } + } + } + } + } + } + bulk_action: { + common: false + description: "Action to use when making requests to the [Elasticsearch Bulk API](elasticsearch_bulk). Supports `index` and `create`." + name: "bulk_action" + required: false + warnings: [] + type: { + string: { + default: "index" + examples: ["index", "create"] + syntax: "literal" + } + } + } + doc_type: { + common: false + description: "The `doc_type` for your index data. This is only relevant for Elasticsearch <= 6.X. If you are using >= 7.0 you do not need to set this option since Elasticsearch has removed it." + name: "doc_type" + required: false + warnings: [] + type: { + string: { + default: "_doc" + syntax: "literal" + } + } + } + endpoint: { + description: "The Elasticsearch endpoint to send logs to. This should be the full URL as shown in the example." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["http://10.24.32.122:9000", "https://example.com", "https://user:password@example.com"] + syntax: "literal" + } + } + } + id_key: { + common: false + description: "The name of the event key that should map to Elasticsearch's [`_id` field][urls.elasticsearch_id_field]. By default, Vector does not set the `_id` field, which allows Elasticsearch to set this automatically. You should think carefully about setting your own Elasticsearch IDs, since this can [hinder perofrmance][urls.elasticsearch_id_performance]." + name: "id_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["id", "_id"] + syntax: "literal" + } + } + } + index: { + common: true + description: "Index name to write events to." + name: "index" + required: false + warnings: [] + type: { + string: { + default: "vector-%F" + examples: ["application-{{ application_id }}-%Y-%m-%d", "vector-%Y-%m-%d"] + syntax: "template" + } + } + } + pipeline: { + common: true + description: "Name of the pipeline to apply." + name: "pipeline" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["pipeline-name"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10490000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + headers: { + common: false + category: "Headers" + description: "Options for custom headers." + name: "headers" + required: false + warnings: [] + type: { + object: { + examples: [{ + Authorization: "${HTTP_TOKEN}" + "X-Powered-By": "Vector" + }] + options: {} + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + query: { + common: false + category: "Query" + description: "Custom parameters to Elasticsearch query string." + name: "query" + required: false + warnings: [] + type: { + object: { + examples: [{ + "X-Powered-By": "Vector" + }] + options: {} + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + elasticsearch: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "elasticsearch" + #ExampleConfig: { + title: string + configuration: { + auth: null + aws: null + bulk_action: null + doc_type: null + endpoint: null + id_key: null + index: null + pipeline: null + query: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + conflicts: { + #Subsection: { + title: string + body: string + } + name: "conflicts" + title: "Conflicts" + body: """ + Vector [batches](#buffers--batches) data flushes it to Elasticsearch's + [`_bulk` API endpoint][urls.elasticsearch_bulk]. By default, all events are + inserted via the `index` action which will update documents if an existing + one has the same `id`. If `bulk_action` is configured with `create`, Elasticsearch + will _not_ replace an existing document and instead return a conflict error. + """ + } + data_streams: { + #Subsection: { + title: string + body: string + } + name: "data_streams" + title: "Data streams" + body: """ + By default, Vector will use the `index` action with Elasticsearch's Bulk API. + To use [Data streams][urls.elasticsearch_data_streams], `bulk_action` must be configured + with the `create` option. + """ + } + partial_failures: { + #Subsection: { + title: string + body: string + } + name: "partial_failures" + title: "Partial Failures" + body: """ + By default, Elasticsearch will allow partial bulk ingestion + failures. This is typically due to type Elasticsearch index + mapping errors, where data keys are not consistently typed. + To change this behavior please refer to the Elasticsearch + [`ignore_malformed` setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/ignore-malformed.html). + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + missing_keys_total: { + description: "The total number of failed template renders due to missed keys from the event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "missing_keys_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + aws_kinesis_streams: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + partition_key_field: { + common: true + description: "The log field used as the Kinesis record's partition key value." + name: "partition_key_field" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["user_id"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 5000000 + unit: "bytes" + } + } + } + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + stream_name: { + description: "The [stream name](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) of the target Kinesis Logs stream." + name: "stream_name" + required: true + warnings: [] + type: { + string: { + examples: ["my-stream"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_kinesis_streams: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_kinesis_streams" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + By default, Vector issues random 16 byte values for each + [Kinesis record's partition key](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecordsRequestEntry.html#Streams-Type-PutRecordsRequestEntry-PartitionKey), evenly + distributing records across your Kinesis partitions. Depending on your use case + this might not be sufficient since random distribution does not preserve order. + To override this, you can supply the `partition_key_field` option. This option + presents an alternate field on your event to use as the partition key value instead. + This is useful if you have a field already on your event, and it also pairs + nicely with the [`add_fields` transform][docs.transforms.add_fields]. + """ + sub_sections: [{ + title: "Missing partition keys" + body: """ + Kenesis requires a value for the partition key and therefore if the key is + missing or the value is blank the event will be dropped and a + [`warning` level log event][docs.monitoring#logs] will be logged. As such, + the field specified in the `partition_key_field` option should always contain + a value. + """ + }, { + title: "Partition keys that exceed 256 characters" + body: """ + If the value provided exceeds the maximum allowed length of 256 characters + Vector will slice the value and use the first 256 characters. + """ + }, { + title: "Non-string partition keys" + body: "Vector will coerce the value into a string." + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + title: "AWS Kinesis Data Streams" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + partition_key_field: null + stream_name: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 5000000 + max_events: 500 + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS Kinesis Data Streams" + thing: "a AWS Kinesis Data Streams stream" + url: "https://aws.amazon.com/kinesis/data-streams/" + versions: null + description: "[Amazon Kinesis Data Streams](https://aws.amazon.com/kinesis/data-streams/) is a scalable and durable real-time data streaming service that can continuously capture gigabytes of data per second from hundreds of thousands of sources. Making it an excellent candidate for streaming logs and metrics data." + } + interface: { + socket: { + api: { + title: "AWS Kinesis Data Streams API" + url: "https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStream.html" + action: "kinesis:DescribeStream" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html" + action: "kinesis:PutRecords" + }] + }] + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + splunk_hec: { + kind: "sink" + title: "Splunk HEC" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["Splunk"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 10 + rate_limit_duration_secs: 1 + rate_limit_num: 10 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Splunk" + thing: "a Splunk database" + url: "https://www.splunk.com" + versions: null + } + interface: { + socket: { + api: { + title: "Splunk HEC event endpoint" + url: "https://docs.splunk.com/Documentation/Splunk/8.0.0/RESTREF/RESTinput#services.2Fcollector.2Fevent" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + description: "The base URL of the Splunk instance." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["https://http-inputs-hec.splunkcloud.com", "https://hec.splunk.com:8088", "http://example.com"] + syntax: "literal" + } + } + } + host_key: { + common: true + description: "The name of the log field to be used as the hostname sent to Splunk HEC. This overrides the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["hostname"] + syntax: "literal" + } + } + } + index: { + common: false + description: """ + The name of the index where send the events to. If not specified, the default index is used. + + """ + name: "index" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["custom_index"] + syntax: "literal" + } + } + } + indexed_fields: { + common: true + description: "Fields to be [added to Splunk index][urls.splunk_hec_indexed_fields]." + name: "indexed_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["field1", "field2"] + syntax: "field_path" + } + } + } + } + } + } + source: { + common: false + description: """ + The source of events sent to this sink. Typically the filename the logs originated from. If unset, the Splunk collector will set it. + + """ + name: "source" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/var/log/syslog", "UDP:514"] + syntax: "literal" + } + } + } + sourcetype: { + common: false + description: """ + The sourcetype of events sent to this sink. If unset, Splunk will default to httpevent. + + """ + name: "sourcetype" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["_json", "httpevent"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + token: { + description: "Your Splunk HEC token." + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${SPLUNK_HEC_TOKEN}", "A94A8FE5CCB19BA61C4C08"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + splunk_hec: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "splunk_hec" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + host_key: null + index: null + indexed_fields: null + source: null + sourcetype: null + token: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + encode_errors_total: { + description: "The total number of errors encountered when encoding an event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "encode_errors_total" + } + http_request_errors_total: { + description: "The total number of HTTP request errors for this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_request_errors_total" + } + missing_keys_total: { + description: "The total number of failed template renders due to missed keys from the event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "missing_keys_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + requests_received_total: { + description: "The total number of requests received by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "requests_received_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + } + sematext_logs: { + kind: "sink" + title: "Sematext Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["Sematext"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 10490000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "Sematext" + thing: "a Sematext account" + url: "https://sematext.com" + versions: null + description: "[Sematext](https://sematext.com) is a hosted monitoring platform based on Elasticsearch. Providing powerful monitoring and management solutions to monitor and observe your apps in real-time." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + common: false + description: "The endpoint to send data to." + name: "endpoint" + relevant_when: "region is not set" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["http://127.0.0.1", "http://example.com"] + syntax: "literal" + } + } + } + region: { + description: "The region to send data to." + name: "region" + required: true + relevant_when: "endpoint is not set" + warnings: [] + type: { + string: { + enum: { + us: "United States" + eu: "Europe" + } + examples: ["us", "eu"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10490000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + token: { + description: "The token that will be used to write to Sematext." + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${SEMATEXT_TOKEN}", "some-sematext-token"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + sematext_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "sematext_logs" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + region: null + token: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + setup: { + #Subsection: { + title: string + body: string + } + name: "setup" + title: "Setup" + body: """ + 1. Register for a free account at [Sematext.com](https://apps.sematext.com/ui/registration) + + 2. [Create a Logs App](https://apps.sematext.com/ui/integrations) to get a Logs Token + for [Sematext Logs](http://www.sematext.com/logsene/) + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + humio_logs: { + kind: "sink" + title: "Humio Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Humio"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 10 + rate_limit_duration_secs: 1 + rate_limit_num: 10 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Humio" + thing: "a Humio database" + url: "https://humio.com" + versions: null + description: "[Humio][urls.humio] is a time-series logging and aggregation platform for unrestricted, comprehensive event analysis, On-Premises or in the Cloud. With 1TB/day of raw log ingest/node, in-memory stream processing, and live, shareable dashboards and alerts, you can instantly and in real-time explore, monitor, and visualize any system’s data. Metrics are converted to log events via the metric_to_log transform." + } + interface: { + socket: { + api: { + title: "Humio Splunk HEC API" + url: "https://docs.humio.com/integrations/data-shippers/hec/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "disabled" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + common: false + description: "The base URL of the Humio instance." + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "https://cloud.humio.com" + examples: ["http://127.0.0.1", "http://example.com"] + syntax: "literal" + } + } + } + event_type: { + common: false + description: """ + The type of events sent to this sink. Humio uses this as the name of the parser to use to ingest the data. + + If unset, Humio will default it to none. + + """ + name: "event_type" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["json", "none"] + syntax: "template" + } + } + } + host_key: { + common: true + description: "The name of the log field to be used as the hostname sent to Humio. This overrides the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["hostname"] + syntax: "literal" + } + } + } + source: { + common: false + description: """ + The source of events sent to this sink. Typically the filename the logs originated from. Maps to @source in Humio. + + """ + name: "source" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["{{file}}", "/var/log/syslog", "UDP:514"] + syntax: "template" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + token: { + description: "Your Humio ingestion token." + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${HUMIO_TOKEN}", "A94A8FE5CCB19BA61C4C08"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + humio_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "humio_logs" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + event_type: null + host_key: null + source: null + token: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + papertrail: { + kind: "sink" + title: "Papertrail" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "stream" + service_providers: ["Papertrail"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + send_buffer_bytes: { + enabled: true + } + keepalive: { + enabled: true + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: true + } + to: { + service: { + name: "Papertrail" + thing: "a Papertrail account" + url: "https://www.papertrail.com/" + versions: null + description: "[Papertrail](https://www.papertrail.com/) is a web-based log aggregation application used by developers and IT team to search and view logs in real time." + } + interface: { + socket: { + api: { + title: "Syslog" + url: "https://en.wikipedia.org/wiki/Syslog" + } + direction: "outgoing" + protocols: ["tcp"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + keepalive: "Supports TCP keepalive for efficient resource use and reliability." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + description: "The endpoint to send logs to." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["logs.papertrailapp.com:12345"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the sink." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + papertrail: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "papertrail" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + type: null + inputs: null + buffer: null + encoding: null + healthcheck: null + keepalive: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + setup: { + #Subsection: { + title: string + body: string + } + name: "setup" + title: "Setup" + body: """ + 1. Register for a free account at [Papertrailapp.com](https://papertrailapp.com/signup?plan=free) + + 2. [Create a Log Destination](https://papertrailapp.com/destinations/new) to get a Log Destination + and ensure that TCP is enabled. + + 3. Set the log destination as the `endpoint` option and start shipping your logs! + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + loki: { + kind: "sink" + title: "Loki" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Grafana"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_events: 100000 + max_bytes: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: "json" + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 1 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Loki" + thing: "a Loki database" + url: "https://grafana.com/oss/loki/" + versions: null + description: "[Loki][urls.loki] is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by [Prometheus][urls.prometheus]. It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + description: "The base URL of the Loki instance." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["http://localhost:3100"] + syntax: "literal" + } + } + } + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${LOKI_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${LOKI_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + labels: { + category: "Labels" + description: "A set of labels that will be attached to each batch of events. These values are also templateable to allow events to provide dynamic label values.Note: If the set of label values has high cardinality this can cause drastic performance issues with Loki. To ensure this does not happen one should try to reduce the amount of unique label values." + name: "labels" + required: true + warnings: [] + type: { + object: { + examples: [{ + forwarder: "vector" + event: "{{ event_field }}" + key: "value" + }] + options: { + "*": { + common: false + description: "Any Loki label" + name: "*" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["vector", "{{ event_field }}"] + syntax: "template" + } + } + } + } + } + } + } + out_of_order_action: { + common: false + description: """ + Some sources may generate events with timestamps that are + not strictly in chronological order. The Loki service cannot + accept a stream of such events. Vector will sort events before + sending it to Loki. However, some late events might arrive after + a batch has been sent. This option specifies what Vector should do + with those events. + """ + name: "out_of_order_action" + required: false + warnings: [] + type: { + string: { + syntax: "literal" + default: "drop" + enum: { + drop: "Drop the event, with a warning." + rewrite_timestamp: "Rewrite timestamp of the event to the latest timestamp that was pushed." + } + } + } + } + remove_label_fields: { + common: false + description: "If this is set to `true` then when labels are collected from events those fields will also get removed from the event." + name: "remove_label_fields" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + remove_timestamp: { + common: false + description: "If this is set to `true` then the timestamp will be removed from the event. This is useful because Loki uses the timestamp to index the event." + name: "remove_timestamp" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 100000 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + tenant_id: { + common: false + description: """ + The tenant id that will be sent with every request, by default this is not required since a proxy should set this header. When running Loki locally a tenant id is not required either. + + You can read more about tenant id's [here][urls.loki_multi_tenancy] + """ + name: "tenant_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["some_tenant_id", "{{ event_field }}"] + syntax: "template" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + loki: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "loki" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + auth: null + labels: null + out_of_order_action: null + remove_label_fields: null + remove_timestamp: null + tenant_id: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + decentralized_deployments: { + #Subsection: { + title: string + body: string + } + name: "decentralized_deployments" + title: "Decentralized Deployments" + body: """ + Loki currently does not support out-of-order inserts. If + Vector is deployed in a decentralized setup then there is + the possibility that logs might get rejected due to data + races between Vector instances. To avoid this we suggest + either assigning each Vector instance with a unique label + or deploying a centralized Vector which will ensure no logs + will get sent out-of-order. + """ + } + concurrency: { + #Subsection: { + title: string + body: string + } + name: "concurrency" + title: "Concurrency" + body: """ + To make sure logs arrive at Loki in a correct order, + the `loki` sink only sends one request at a time. + Setting `request.concurrency` will not have any effects. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + event_ordering: { + #Subsection: { + title: string + body: string + } + name: "event_ordering" + title: "Event Ordering" + body: """ + The `loki` sink will ensure that all logs are sorted via + their `timestamp`. This is to ensure that logs will be + accepted by Loki. If no timestamp is supplied with events + then the Loki sink will supply its own monotonically + increasing timestamp. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + humio_metrics: { + kind: "sink" + title: "Humio Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Humio"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 10 + rate_limit_duration_secs: 1 + rate_limit_num: 10 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Humio" + thing: "a Humio database" + url: "https://humio.com" + versions: null + description: "[Humio][urls.humio] is a time-series logging and aggregation platform for unrestricted, comprehensive event analysis, On-Premises or in the Cloud. With 1TB/day of raw log ingest/node, in-memory stream processing, and live, shareable dashboards and alerts, you can instantly and in real-time explore, monitor, and visualize any system’s data. Metrics are converted to log events via the metric_to_log transform." + } + interface: { + socket: { + api: { + title: "Humio Splunk HEC API" + url: "https://docs.humio.com/integrations/data-shippers/hec/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "disabled" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + endpoint: { + common: false + description: "The base URL of the Humio instance." + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "https://cloud.humio.com" + examples: ["http://127.0.0.1", "http://example.com"] + syntax: "literal" + } + } + } + event_type: { + common: false + description: """ + The type of events sent to this sink. Humio uses this as the name of the parser to use to ingest the data. + + If unset, Humio will default it to none. + + """ + name: "event_type" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["json", "none"] + syntax: "template" + } + } + } + host_key: { + common: true + description: "The name of the log field to be used as the hostname sent to Humio. This overrides the [global `host_key` option][docs.reference.configuration.global-options#host_key]." + name: "host_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["hostname"] + syntax: "literal" + } + } + } + source: { + common: false + description: """ + The source of events sent to this sink. Typically the filename the logs originated from. Maps to @source in Humio. + + """ + name: "source" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["{{file}}", "/var/log/syslog", "UDP:514"] + syntax: "template" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + token: { + description: "Your Humio ingestion token." + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${HUMIO_TOKEN}", "A94A8FE5CCB19BA61C4C08"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + humio_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "humio_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + event_type: null + host_key: null + source: null + token: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + metrics: { + #Subsection: { + title: string + body: string + } + name: "metrics" + title: "Metrics" + body: "Metrics are converted to log events via the `log_to_event` transform prior to sending to humio." + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + http: { + kind: "sink" + title: "HTTP" + classes: { + commonly_used: true + service_providers: [] + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: true + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "ndjson", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 10 + rate_limit_duration_secs: 1 + rate_limit_num: 1000 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: true + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "HTTP" + thing: "an HTTP server" + url: "https://en.wikipedia.org/wiki/Web_server" + versions: null + } + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${HTTP_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + uri: { + description: """ + The full URI to make HTTP requests to. This should include the protocol and host, + but can also include the port, path, and any other valid part of a URI. + """ + name: "uri" + required: true + warnings: [] + type: { + string: { + examples: ["https://10.22.212.22:9000/endpoint"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: true + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "ndjson", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + headers: { + common: false + category: "Headers" + description: "Options for custom headers." + name: "headers" + required: false + warnings: [] + type: { + object: { + examples: [{ + Authorization: "${HTTP_TOKEN}" + "X-Powered-By": "Vector" + }] + options: {} + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + category: "Healthcheck" + common: true + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + uri: { + common: false + description: """ + The full URI to make HTTP health check request to. This should include the protocol and host, + but can also include the port, path, and any other valid part of a URI. + """ + name: "uri" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["https://10.22.212.22:9000/health"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + http: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "http" + #ExampleConfig: { + title: string + configuration: { + auth: null + uri: null + healthcheck: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + events_discarded_total: { + description: "The total number of events discarded by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "events_discarded_total" + } + http_bad_requests_total: { + description: "The total number of HTTP `400 Bad Request` errors encountered." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "http_bad_requests_total" + } + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + } + datadog_logs: { + kind: "sink" + title: "Datadog Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["Datadog"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "gzip" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: true + } + to: { + service: { + name: "Datadog logs" + thing: "a Datadog logs index" + url: "https://docs.datadoghq.com/logs/" + versions: null + description: "[Datadog](https://www.datadoghq.com) is a monitoring service for cloud-scale applications, providing monitoring of servers, databases, tools, and services, through a SaaS-based data analytics platform." + } + interface: { + socket: { + api: { + title: "Datadog logs API" + url: "https://docs.datadoghq.com/logs/log_collection/?tab=http#datadog-logs-endpoints" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + api_key: { + description: "Datadog [API key](https://docs.datadoghq.com/api/?lang=bash#authentication)" + name: "api_key" + required: true + warnings: [] + type: { + string: { + examples: ["${DATADOG_API_KEY_ENV_VAR}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "gzip" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "The endpoint to send data to." + name: "endpoint" + relevant_when: "region is not set" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.1:8080", "example.com:12345"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + datadog_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "datadog_logs" + #ExampleConfig: { + title: string + configuration: { + api_key: null + endpoint: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + kafka: { + kind: "sink" + title: "Kafka" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "stable" + egress_method: "dynamic" + service_providers: ["AWS", "Confluent"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: true + max_bytes: null + max_events: null + timeout_secs: null + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip", "lz4", "snappy", "zstd"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: false + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: false + can_verify_hostname: false + enabled_default: false + } + to: { + service: { + name: "Kafka" + thing: "Kafka topics" + url: "https://kafka.apache.org/" + versions: ">= 0.8" + description: "[Apache Kafka](https://kafka.apache.org/) is an open-source project for a distributed publish-subscribe messaging system rethought as a distributed commit log. Kafka stores messages in topics that are partitioned and replicated across multiple brokers in a cluster. Producers send messages to topics from which consumers read. These features make it an excellent candidate for durably storing logs and metrics data." + } + interface: { + socket: { + api: { + title: "Kafka protocol" + url: "https://kafka.apache.org/protocol" + } + direction: "outgoing" + protocols: ["tcp"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + bootstrap_servers: { + description: "A comma-separated list of host and port pairs that are the addresses of the Kafka brokers in a \"bootstrap\" Kafka cluster that a Kafka client connects to initially to bootstrap itself." + name: "bootstrap_servers" + required: true + warnings: [] + type: { + string: { + examples: ["10.14.22.123:9092,10.14.23.332:9092"] + syntax: "literal" + } + } + } + key_field: { + description: "The log field name or tags key to use for the topic key. If unspecified, the key will be randomly generated. If the field does not exist on the log or in tags, a blank value will be used." + name: "key_field" + required: true + warnings: [] + type: { + string: { + examples: ["user_id"] + syntax: "literal" + } + } + } + librdkafka_options: { + common: false + category: "Librdkafka_options" + description: """ + Advanced options. See [librdkafka documentation](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) for details. + + """ + name: "librdkafka_options" + required: false + warnings: [] + type: { + object: { + examples: [{ + "client.id": "${ENV_VAR}" + "fetch.error.backoff.ms": "1000" + "socket.send.buffer.bytes": "100" + }] + options: {} + } + } + } + message_timeout_ms: { + common: false + description: "Local message timeout." + name: "message_timeout_ms" + required: false + warnings: [] + type: { + uint: { + default: 300000 + examples: [150000, 450000] + unit: null + } + } + } + sasl: { + common: false + category: "Sasl" + description: "Options for SASL/SCRAM authentication support." + name: "sasl" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enable SASL/SCRAM authentication to the remote. (Not supported on Windows at this time.)" + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: null + } + } + } + mechanism: { + common: true + description: "The Kafka SASL/SCRAM mechanisms." + name: "mechanism" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["SCRAM-SHA-256", "SCRAM-SHA-512"] + syntax: "literal" + } + } + } + password: { + common: true + description: "The Kafka SASL/SCRAM authentication password." + name: "password" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["password"] + syntax: "literal" + } + } + } + username: { + common: true + description: "The Kafka SASL/SCRAM authentication username." + name: "username" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["username"] + syntax: "literal" + } + } + } + } + } + } + } + socket_timeout_ms: { + common: false + description: """ + Default timeout for network requests. + + """ + name: "socket_timeout_ms" + required: false + warnings: [] + type: { + uint: { + default: 60000 + examples: [30000, 60000] + unit: "milliseconds" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: true + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: {} + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + topic: { + description: "The Kafka topic name to write events to." + name: "topic" + required: true + warnings: [] + type: { + string: { + examples: ["topic-1234", "logs-{{unit}}-%Y-%m-%d"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + kafka: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: true + summary: true + } + } + env_vars: {} + type: "kafka" + #ExampleConfig: { + title: string + configuration: { + bootstrap_servers: null + key_field: null + librdkafka_options: null + message_timeout_ms: null + sasl: null + socket_timeout_ms: null + topic: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + librdkafka: { + #Subsection: { + title: string + body: string + } + name: "librdkafka" + title: "librdkafka" + body: """ + The `kafka` sink uses [`librdkafka`](https://github.com/edenhill/librdkafka) under the hood. This + is a battle-tested, high performance, and reliable library that facilitates + communication with Kafka. As Vector produces static MUSL builds, + this dependency is packaged with Vector, meaning you do not need to install it. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + aws_kinesis_firehose: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + stream_name: { + description: "The [stream name](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html) of the target Kinesis Firehose delivery stream." + name: "stream_name" + required: true + warnings: [] + type: { + string: { + examples: ["my-stream"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 4000000 + unit: "bytes" + } + } + } + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_kinesis_firehose: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_kinesis_firehose" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + title: "AWS Kinesis Firehose" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + stream_name: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 4000000 + max_events: 500 + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["json", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS Kinesis Firehose" + thing: "a AWS Kinesis Firehose stream" + url: "https://aws.amazon.com/kinesis/data-firehose/" + versions: null + description: """ + [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) is a fully + managed service for delivering real-time streaming data to destinations + such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, + Amazon Elasticsearch Service (Amazon ES), and Splunk. + """ + connect_to: { + vector: { + logs: { + setup: [{ + title: "Configure Vector to accept AWS Kinesis Firehose data" + vector: { + configure: { + sources: { + aws_kinesis_firehose: { + type: "aws_kinesis_firehose" + address: "0.0.0.0:443" + access_key: "A94A8FE5CCB19BA61C4C08" + region: "us-east-1" + } + } + } + } + }, { + title: "Configure TLS termination" + description: """ + AWS Kinesis Firehose will only forward to HTTPS (and not HTTP) + endpoints running on port 443. You will need to either put a load + balancer in front of the Vector instance to handle TLS termination + or configure the `tls` options of the Vector `aws_kinesis_firehose` + source to serve a valid certificate. + """ + detour: { + url: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + } + }, { + title: "Create an AWS Kinesis Firehose HTTP Stream" + description: """ + Using your previously configured TLS enabled HTTP endpoint, + let's create a Kinesis Firehose HTTP stream that delivers + data to it. Be sure to use your HTTP endpoint. + """ + detour: { + url: "https://aws.amazon.com/blogs/big-data/stream-data-to-an-http-endpoint-with-amazon-kinesis-data-firehose/" + } + }] + } + } + } + } + interface: { + socket: { + api: { + title: "AWS Kinesis Firehose API" + url: "https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecordBatch.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: true + metrics: null + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://docs.aws.amazon.com/firehose/latest/APIReference/API_DescribeDeliveryStream.html" + action: "firehose:DescribeDeliveryStream" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecordBatch.html" + action: "firehose:PutRecordBatch" + }] + }] + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + sematext_metrics: { + kind: "sink" + title: "Sematext Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + service_providers: ["Sematext"] + egress_method: "batch" + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 30000000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: false + } + tls: { + enabled: false + } + to: { + service: { + name: "Sematext" + thing: "a Sematext account" + url: "https://sematext.com" + versions: null + description: "[Sematext](https://sematext.com) is a hosted monitoring platform based on Elasticsearch. Providing powerful monitoring and management solutions to monitor and observe your apps in real-time." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + [Sematext monitoring](https://sematext.com/docs/monitoring/) only accepts metrics which contain a single value. + Therefore, only `counter` and `gauge` metrics are supported. If you'd like to ingest other + metric types please consider using the [`metric_to_log` transform][docs.transforms.metric_to_log] + with the `sematext_logs` sink. + """] + notices: [] + } + configuration: { + endpoint: { + common: false + description: "The endpoint to send data to." + name: "endpoint" + relevant_when: "region is not set" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["http://127.0.0.1", "http://example.com"] + syntax: "literal" + } + } + } + region: { + description: "The region to send data to." + name: "region" + required: true + relevant_when: "endpoint is not set" + warnings: [] + type: { + string: { + enum: { + us: "United States" + eu: "Europe" + } + examples: ["us", "eu"] + syntax: "literal" + } + } + } + default_namespace: { + description: "Used as a namespace for metrics that don't have it." + name: "default_namespace" + required: true + warnings: [] + type: { + string: { + examples: ["service"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 30000000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + token: { + description: "The token that will be used to write to Sematext." + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${SEMATEXT_TOKEN}", "some-sematext-token"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + sematext_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: false + gauge: true + histogram: false + set: false + summary: false + } + } + env_vars: {} + type: "sematext_metrics" + #ExampleConfig: { + title: string + configuration: { + endpoint: null + region: null + token: null + default_namespace: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + encode_errors_total: { + description: "The total number of errors encountered when encoding an event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "encode_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + } + } + honeycomb: { + kind: "sink" + title: "Honeycomb" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Honeycomb"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 5242880 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "Honeycomb" + thing: "a Honeycomb dataset" + url: "https://honeycomb.io" + versions: null + description: "[Honeycomb](https://honeycomb.io) provides full stack observability—designed for high cardinality data and collaborative problem solving, enabling engineers to deeply understand and debug production software together." + } + interface: { + socket: { + api: { + title: "Honeycomb batch events API" + url: "https://docs.honeycomb.io/api/events/#batched-events" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + api_key: { + description: "The team key that will be used to authenticate against Honeycomb." + name: "api_key" + required: true + warnings: [] + type: { + string: { + examples: ["${HONEYCOMB_API_KEY}", "some-api-key"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 5242880 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + dataset: { + description: "The dataset that Vector will send logs to." + name: "dataset" + required: true + warnings: [] + type: { + string: { + examples: ["my-honeycomb-dataset"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + honeycomb: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "honeycomb" + #ExampleConfig: { + title: string + configuration: { + api_key: null + dataset: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + setup: { + #Subsection: { + title: string + body: string + } + name: "setup" + title: "Setup" + body: """ + 1. Register for a free account at [honeycomb.io](https://ui.honeycomb.io/signup) + + 2. Once registered, create a new dataset and when presented with log shippers select the + curl option and use the key provided with the curl example. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + gcp_stackdriver_logs: { + kind: "sink" + title: "GCP Operations (formerly Stackdrive) Logs" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["GCP"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 5242880 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 1000 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "GCP Operations (formerly Stackdriver) logs" + thing: "a GCP Operations (formerly Stackdriver) logs account" + url: "https://cloud.google.com/logging/docs/reference/v2/rest/" + versions: null + description: "[Stackdriver][urls.gcp_stackdriver] is Google Cloud's embedded observability suite designed to monitor, troubleshoot, and improve cloud infrastructure, software and application performance. Stackdriver enables you to efficiently build and run workloads, keeping applications available and performing well." + } + interface: { + socket: { + api: { + title: "REST Interface" + url: "https://cloud.google.com/logging/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + billing_account_id: { + common: false + description: """ + The billing account ID to which to publish logs. + + Exactly one of `billing_account_id`, `folder_id`, `organization_id`, or `project_id` must be set. + """ + name: "billing_account_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["012345-6789AB-CDEF01"] + syntax: "literal" + } + } + } + credentials_path: { + common: true + description: """ + The filename for a Google Cloud service account credentials JSON file used to authenticate access to the Stackdriver Logging API. If this is unset, Vector checks the `GOOGLE_APPLICATION_CREDENTIALS` environment variable for a filename. + + If no filename is named, Vector will attempt to fetch an instance service account for the compute instance the program is running on. If Vector is not running on a GCE instance, you must define a credentials file as above. + """ + name: "credentials_path" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + } + folder_id: { + common: false + description: """ + The folder ID to which to publish logs. + See the [Google Cloud Platform folder documentation][urls.gcp_folders] for more details. + + Exactly one of `billing_account_id`, `folder_id`, `organization_id`, or `project_id` must be set. + """ + name: "folder_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["My Folder"] + syntax: "literal" + } + } + } + log_id: { + description: "The log ID to which to publish logs. This is a name you create to identify this log stream." + name: "log_id" + required: true + warnings: [] + type: { + string: { + examples: ["vector-logs"] + syntax: "literal" + } + } + } + organization_id: { + common: false + description: """ + The organization ID to which to publish logs. This would be the identifier assigned to your organization on Google Cloud Platform. + + Exactly one of `billing_account_id`, `folder_id`, `organization_id`, or `project_id` must be set. + """ + name: "organization_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["622418129737"] + syntax: "literal" + } + } + } + project_id: { + description: """ + The project ID to which to publish logs. See the [Google Cloud Platform project management documentation][urls.gcp_projects] for more details. + + Exactly one of `billing_account_id`, `folder_id`, `organization_id`, or `project_id` must be set. + """ + name: "project_id" + required: true + warnings: [] + type: { + string: { + examples: ["vector-123456"] + syntax: "literal" + } + } + } + resource: { + category: "Resource" + description: "Options for describing the logging resource." + name: "resource" + required: true + warnings: [] + type: { + object: { + examples: [{ + type: "global" + projectId: "vector-123456" + instanceId: "Twilight" + zone: "us-central1-a" + }] + options: { + type: { + description: """ + The monitored resource type. For example, the type of a Compute Engine VM instance is gce_instance. + + See the [Google Cloud Platform monitored resource documentation][urls.gcp_resources] for more details. + """ + name: "type" + required: true + warnings: [] + type: { + string: { + examples: ["global", "gce_instance"] + syntax: "literal" + } + } + } + "*": { + common: false + description: """ + Values for all of the labels listed in the associated monitored resource descriptor. + + For example, Compute Engine VM instances use the labels `projectId`, `instanceId`, and `zone`. + """ + name: "*" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["vector-123456", "Twilight"] + syntax: "literal" + } + } + } + } + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 5242880 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + severity_key: { + common: false + description: """ + The field of the log event from which to take the outgoing log's `severity` field. The named field is removed from the log event if present, and must be either an integer between 0 and 800 or a string containing one of the [severity level names][urls.gcp_stackdriver_severity] (case is ignored) or a common prefix such as `err`. This could be added by an [`add_fields` transform][docs.transforms.add_fields] or extracted from a field from the source. + + If no severity key is specified, the severity of outgoing records will be set to 0 (`DEFAULT`). + + See the [GCP Stackdriver Logging LogSeverity description][urls.gcp_stackdriver_severity] for more details on the value of the `severity` field. + """ + name: "severity_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["severity"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + gcp_stackdriver_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: { + GOOGLE_APPLICATION_CREDENTIALS: { + description: "The filename for a Google Cloud service account credentials JSON file used for authentication." + name: "GOOGLE_APPLICATION_CREDENTIALS" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "gcp_stackdriver_logs" + #ExampleConfig: { + title: string + configuration: { + billing_account_id: null + credentials_path: null + folder_id: null + log_id: null + organization_id: null + project_id: null + resource: null + severity_key: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + severity_level_mapping: { + #Subsection: { + title: string + body: string + } + name: "severity_level_mapping" + title: "Severity Level Mapping" + body: """ + If a `severity_key` is configured, outgoing log records will have their + `severity` header field set from the named field in the Vector + event. However, the [required values][urls.gcp_stackdriver_severity] for + this field may be inconvenient to produce, typically requiring a custom + mapping using an additional transform. To assist with this, this sink + remaps certain commonly used words to the required numbers as in the + following table. Note that only the prefix is compared, such that a + value of `emergency` matches `emerg`, and the comparison ignores case. + + | Prefix | Value + |:-------|:----- + | emerg | 800 + | fatal | 800 + | alert | 700 + | crit | 600 + | err | 500 + | warn | 400 + | notice | 300 + | info | 200 + | debug | 100 + | trace | 100 + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + gcp_authentication: { + #Subsection: { + title: string + body: string + } + name: "gcp_authentication" + title: "GCP Authentication" + body: """ + GCP offers a [variety of authentication methods](https://cloud.google.com/docs/authentication/) and + Vector is concerned with the [server to server methods](https://cloud.google.com/docs/authentication/production) + and will find credentials in the following order: + + 1. If the [`credentials_path`](#credentials_path) option is set. + 1. If the `api_key` option is set. + 1. If the [`GOOGLE_APPLICATION_CREDENTIALS`](#google_application_credentials) envrionment variable is set. + 1. Finally, Vector will check for an [instance service account](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually). + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "\(_service).\(_action)" + } + platform: "gcp" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck", "write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "logging.logEntries.create" + }] + }] + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + logdna: { + kind: "sink" + title: "LogDNA" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["LogDNA"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 10490000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "LogDNA" + thing: "a LogDNA account" + url: "https://logdna.com/" + versions: null + description: "[LogDNA](https://logdna.com/) is a log management system that allows engineering and DevOps to aggregate all system, server, and application logs into one platform. Collect, monitor, store, tail, and search application logs in with one command-line or web interface." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + api_key: { + description: "The Ingestion API key." + name: "api_key" + required: true + warnings: [] + type: { + string: { + examples: ["${LOGDNA_API_KEY}", "ef8d5de700e7989468166c40fc8a0ccd"] + syntax: "literal" + } + } + } + default_app: { + common: false + description: "The default app that will be set for events that do not contain a `file` or `app` field." + name: "default_app" + required: false + warnings: [] + type: { + string: { + default: "vector" + examples: ["vector", "myapp"] + syntax: "literal" + } + } + } + default_env: { + common: false + description: "The default environment that will be set for events that do not contain an `env` field." + name: "default_env" + required: false + warnings: [] + type: { + string: { + default: "production" + examples: ["staging", "production"] + syntax: "literal" + } + } + } + endpoint: { + common: false + description: "The endpoint to send logs to." + name: "endpoint" + required: false + warnings: [] + type: { + string: { + default: "https://logs.logdna.com/logs/ingest" + examples: ["http://127.0.0.1", "http://example.com"] + syntax: "literal" + } + } + } + hostname: { + description: "The hostname that will be attached to each batch of events." + name: "hostname" + required: true + warnings: [] + type: { + string: { + examples: ["${HOSTNAME}", "my-local-machine"] + syntax: "literal" + } + } + } + ip: { + common: false + description: "The IP address that will be attached to each batch of events." + name: "ip" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["0.0.0.0"] + syntax: "literal" + } + } + } + mac: { + common: false + description: "The mac address that will be attached to each batch of events." + name: "mac" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["my-mac-address"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10490000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tags: { + common: false + description: "The tags that will be attached to each batch of events." + name: "tags" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["tag1", "tag2"] + syntax: "literal" + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + logdna: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "logdna" + #ExampleConfig: { + title: string + configuration: { + api_key: null + default_app: null + default_env: null + endpoint: null + hostname: null + ip: null + mac: null + tags: null + type: null + inputs: null + buffer: null + batch: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + blackhole: { + kind: "sink" + title: "Blackhole" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: false + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: false + } + request: { + enabled: false + } + tls: { + enabled: false + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + print_amount: { + common: false + description: "The number of events that must be received in order to print a summary of activity." + name: "print_amount" + required: false + warnings: [] + type: { + uint: { + default: 1000 + examples: [1000] + unit: null + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + rate: { + common: false + description: "Rates the amount of events that the sink can consume per second." + name: "rate" + required: false + warnings: [] + type: { + uint: { + default: null + examples: [1000] + unit: null + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + blackhole: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "blackhole" + #ExampleConfig: { + title: string + configuration: { + print_amount: null + rate: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + processed_bytes_total: { + description: "The total number of bytes processed by the component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_bytes_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + processed_events_total: { + description: "The total number of events processed by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + file: { + name: "file" + description: "The file that produced the error" + required: false + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processed_events_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + } + } + aws_cloudwatch_metrics: { + kind: "sink" + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + access_key_id: { + category: "Auth" + common: false + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "access_key_id" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + } + secret_access_key: { + category: "Auth" + common: false + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "secret_access_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + } + assume_role: { + category: "Auth" + common: false + description: "The ARN of an [IAM role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to assume at startup." + name: "assume_role" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["arn:aws:iam::123456789098:role/my_role"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + common: false + description: "Custom endpoint for use with AWS-compatible services. Providing a value for this option will make `region` moot." + name: "endpoint" + relevant_when: "region = null" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["127.0.0.0:5000/path/to/service"] + syntax: "literal" + } + } + } + default_namespace: { + description: """ + A [namespace](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Namespace) that will isolate different metrics from each other. + Used as a namespace for metrics that don't have it. + """ + name: "default_namespace" + required: true + warnings: [] + type: { + string: { + examples: ["service"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum size of a batch, in events, before it is flushed." + name: "max_events" + required: false + warnings: [] + type: { + uint: { + default: 20 + unit: "events" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + region: { + description: "The [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) of the target service. If `endpoint` is provided it will override this value since the endpoint includes the region." + name: "region" + required: true + relevant_when: "endpoint = null" + warnings: [] + type: { + string: { + examples: ["us-east-1"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + aws_cloudwatch_metrics: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: { + AWS_ACCESS_KEY_ID: { + description: "The AWS access key id. Used for AWS authentication when communicating with AWS services." + name: "AWS_ACCESS_KEY_ID" + common: true + type: { + string: { + default: null + examples: ["AKIAIOSFODNN7EXAMPLE"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CONFIG_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store configuration profiles." + name: "AWS_CONFIG_FILE" + common: true + type: { + string: { + default: "~/.aws/config" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_CREDENTIAL_EXPIRATION: { + description: "Expiration time in RFC 3339 format. If unset, credentials won't expire." + name: "AWS_CREDENTIAL_EXPIRATION" + common: true + type: { + string: { + default: null + examples: ["1996-12-19T16:39:57-08:00"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_DEFAULT_REGION: { + description: "The default [AWS region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)." + name: "AWS_DEFAULT_REGION" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_PROFILE: { + description: "Specifies the name of the CLI profile with the credentials and options to use. This can be the name of a profile stored in a credentials or config file." + name: "AWS_PROFILE" + common: true + type: { + string: { + default: "default" + examples: ["my-custom-profile"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_ROLE_SESSION_NAME: { + description: "Specifies a name to associate with the role session. This value appears in CloudTrail logs for commands performed by the user of this profile." + name: "AWS_ROLE_SESSION_NAME" + common: true + type: { + string: { + default: null + examples: ["vector-session"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SECRET_ACCESS_KEY: { + description: "The AWS secret access key. Used for AWS authentication when communicating with AWS services." + name: "AWS_SECRET_ACCESS_KEY" + common: true + type: { + string: { + default: null + examples: ["wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"] + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SHARED_CREDENTIALS_FILE: { + description: "Specifies the location of the file that the AWS CLI uses to store access keys." + name: "AWS_SHARED_CREDENTIALS_FILE" + common: true + type: { + string: { + default: "~/.aws/credentials" + syntax: "literal" + } + } + required: false + warnings: [] + } + AWS_SESSION_TOKEN: { + description: "The AWS session token. Used for AWS authentication when communicating with AWS services." + name: "AWS_SESSION_TOKEN" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "aws_cloudwatch_metrics" + how_it_works: { + aws_authentication: { + #Subsection: { + title: string + body: string + } + name: "aws_authentication" + title: "AWS Authentication" + body: """ + Vector checks for AWS credentials in the following order: + + 1. Options [`access_key_id`](#access_key_id) and [`secret_access_key`](#secret_access_key). + 2. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + 3. The [`credential_process` command](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html) in the AWS config file. (usually located at `~/.aws/config`) + 4. The [AWS credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). (usually located at `~/.aws/credentials`) + 5. The [IAM instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html). (will only work if running on an EC2 instance with an instance profile/role) + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + sub_sections: [{ + title: "Obtaining an access key" + body: """ + In general, we recommend using instance profiles/roles whenever possible. In + cases where this is not possible you can generate an AWS access key for any user + within your AWS account. AWS provides a [detailed guide](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) on + how to do this. Such created AWS access keys can be used via [`access_key_id`](#access_key_id) + and [`secret_access_key`](#secret_access_key) options. + """ + }, { + title: "Assuming roles" + body: """ + Vector can assume an AWS IAM role via the [`assume_role`](#assume_role) option. This is an + optional setting that is helpful for a variety of use cases, such as cross + account access. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + } + title: "AWS Cloudwatch Metrics" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["AWS"] + stateful: false + } + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + region: null + default_namespace: null + type: null + inputs: null + batch: null + compression: null + healthcheck: null + } + input: {} | {} | [{} | {}] + output: string + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: null + max_events: 20 + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: false + } + request: { + enabled: false + } + tls: { + enabled: false + } + to: { + service: { + name: "AWS Cloudwatch metrics" + thing: "an AWS Cloudwatch metrics namespace" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/working_with_metrics.html" + versions: null + description: "[Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and management service that provides data and actionable insights for AWS, hybrid, and on-premises applications, and infrastructure resources. With CloudWatch, you can collect and access all your performance and operational data in form of logs and metrics from a single platform." + } + interface: { + socket: { + api: { + title: "AWS Cloudwatch metrics API" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + Gauge values are persisted between flushes. On Vector start up each + gauge is assumed to have zero, 0.0, value, that can be updated + explicitly by the consequent absolute, not delta, gauge observation, + or by delta increments/decrements. Delta gauges are considered an + advanced feature useful in a distributed setting, however they + should be used with care. + """] + notices: [""" + CloudWatch Metrics types are organized not by their semantics, but + by storage properties: + + * Statistic Sets + * Data Points + + In Vector only the latter is used to allow lossless statistics + calculations on CloudWatch side. + """] + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: false + set: false + summary: false + } + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: !="" & "https://docs.aws.amazon.com/\(_docs_tag)/latest/APIReference/API_\(_action).html" + action: "\(_service):\(_action)" + } + platform: "aws" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck", "write"] + docs_url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html" + action: "cloudwatch:PutMetricData" + }] + }] + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + file: { + kind: "sink" + title: "File" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: true + default: "none" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["ndjson", "text"] + } + } + request: { + enabled: false + } + tls: { + enabled: false + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + idle_timeout_secs: { + common: false + description: """ + The amount of time a file can be idle and stay open. After not receiving any events for this timeout, the file will be flushed and closed. + + """ + name: "idle_timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: null + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["ndjson", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + path: { + description: "File name to write events to." + name: "path" + required: true + warnings: [] + type: { + string: { + examples: ["/tmp/vector-%Y-%m-%d.log", "/tmp/application-{{ application_id }}-%Y-%m-%d.log"] + syntax: "template" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + file: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "file" + #ExampleConfig: { + title: string + configuration: { + idle_timeout_secs: null + path: null + type: null + inputs: null + compression: null + encoding: null + healthcheck: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + dir_and_file_creation: { + #Subsection: { + title: string + body: string + } + name: "dir_and_file_creation" + title: "File & Directory Creation" + body: """ + Vector will attempt to create the entire directory structure + and the file when emitting events to the file sink. This + requires that the Vector agent have the correct permissions + to create and write to files in the specified directories. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + new_relic_logs: { + kind: "sink" + title: "New Relic Logs" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "stable" + egress_method: "batch" + service_providers: ["New Relic"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 5240000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "none" + algorithms: ["gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 100 + rate_limit_duration_secs: 1 + rate_limit_num: 100 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: false + } + to: { + service: { + name: "New Relic logs" + thing: "a New Relic logs account" + url: "https://newrelic.com/" + versions: null + description: "[New Relic][urls.new_relic] is a San Francisco, California-based technology company which develops cloud-based software to help website and application owners track the performances of their services." + } + interface: { + socket: { + api: { + title: "New Relic Log API" + url: "https://docs.newrelic.com/docs/logs/new-relic-logs/log-api/introduction-log-api" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + insert_key: { + common: true + description: "Your New Relic insert key (if applicable)." + name: "insert_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["xxxx", "${NEW_RELIC_INSERT_KEY}"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 5240000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 100 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 100 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + license_key: { + common: true + description: "Your New Relic license key (if applicable)." + name: "license_key" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["xxxx", "${NEW_RELIC_LICENSE_KEY}"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + new_relic_logs: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "new_relic_logs" + #ExampleConfig: { + title: string + configuration: { + insert_key: null + license_key: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + gcp_cloud_storage: { + kind: "sink" + title: "GCP Cloud Storage (GCS)" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["GCP"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 10485760 + max_events: null + timeout_secs: 300 + } + compression: { + enabled: true + default: "none" + algorithms: ["gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["ndjson", "text"] + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 25 + rate_limit_duration_secs: 1 + rate_limit_num: 1000 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 60 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "GCP Cloud Storage" + thing: "a GCP Cloud Storage bucket" + url: "https://cloud.google.com/storage" + versions: null + description: "[Google Cloud Storage][urls.gcp_cloud_storage] is a RESTful online file storage web service for storing and accessing data on Google Cloud Platform infrastructure. The service combines the performance and scalability of Google's cloud with advanced security and sharing capabilities. This makes it a prime candidate for log data." + } + interface: { + socket: { + api: { + title: "GCP XML Interface" + url: "https://cloud.google.com/storage/docs/xml-api/overview" + } + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + acl: { + category: "ACL" + common: false + description: "Predefined ACL to apply to the created objects. For more information, see [Predefined ACLs][urls.gcs_predefined_acl]. If this is not set, GCS will apply a default ACL when the object is created." + name: "acl" + required: false + warnings: [] + type: { + string: { + default: null + enum: { + "authenticated-read": "Gives the bucket or object owner OWNER permission, and gives all authenticated Google account holders READER permission." + "bucket-owner-full-control": "Gives the object and bucket owners OWNER permission." + "bucket-owner-read": "Gives the object owner OWNER permission, and gives the bucket owner READER permission." + private: "Gives the bucket or object owner OWNER permission for a bucket or object." + "project-private": "Gives permission to the project team based on their roles. Anyone who is part of the team has READER permission. Project owners and project editors have OWNER permission. This the default." + "public-read": "Gives the bucket or object owner OWNER permission, and gives all users, both authenticated and anonymous, READER permission. When you apply this to an object, anyone on the Internet can read the object without authenticating." + } + syntax: "literal" + } + } + } + bucket: { + description: "The GCS bucket name." + name: "bucket" + required: true + warnings: [] + type: { + string: { + examples: ["my-bucket"] + syntax: "literal" + } + } + } + credentials_path: { + category: "Auth" + common: true + description: """ + The filename for a Google Cloud service account credentials JSON file used to authenticate access to the Cloud Storage API. If this is unset, Vector checks the `GOOGLE_APPLICATION_CREDENTIALS` environment variable for a filename. + + If no filename is named, Vector will attempt to fetch an instance service account for the compute instance the program is running on. If Vector is not running on a GCE instance, you must define a credentials file as above. + """ + name: "credentials_path" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + } + filename_append_uuid: { + category: "File Naming" + common: false + description: "Whether or not to append a UUID v4 token to the end of the file. This ensures there are no name collisions high volume use cases." + name: "filename_append_uuid" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + filename_extension: { + category: "File Naming" + common: false + description: "The filename extension to use in the object name." + name: "filename_extension" + required: false + warnings: [] + type: { + string: { + default: "log" + syntax: "literal" + } + } + } + filename_time_format: { + category: "File Naming" + common: false + description: "The format of the resulting object file name. [`strftime` specifiers][urls.strptime_specifiers] are supported." + name: "filename_time_format" + required: false + warnings: [] + type: { + string: { + default: "%s" + syntax: "literal" + } + } + } + key_prefix: { + category: "File Naming" + common: true + description: "A prefix to apply to all object key names. This should be used to partition your objects, and it's important to end this value with a `/` if you want this to be the root GCS \"folder\"." + name: "key_prefix" + required: false + warnings: [] + type: { + string: { + default: "date=%F/" + examples: ["date=%F/", "date=%F/hour=%H/", "year=%Y/month=%m/day=%d/", "application_id={{ application_id }}/date=%F/"] + syntax: "template" + } + } + } + metadata: { + common: false + description: "The set of metadata `key:value` pairs for the created objects. See the [GCS custom metadata][urls.gcs_custom_metadata] documentation for more details." + name: "metadata" + required: false + warnings: [] + type: { + string: { + default: null + examples: [] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 10485760 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 300 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "none" + enum: { + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["ndjson", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 25 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 1000 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + storage_class: { + category: "Storage" + common: false + description: "The storage class for the created objects. See [the GCP storage classes][urls.gcs_storage_classes] for more details." + name: "storage_class" + required: false + warnings: [] + type: { + string: { + default: null + enum: { + STANDARD: "Standard Storage is best for data that is frequently accessed and/or stored for only brief periods of time. This is the default." + NEARLINE: "Nearline Storage is a low-cost, highly durable storage service for storing infrequently accessed data." + COLDLINE: "Coldline Storage is a very-low-cost, highly durable storage service for storing infrequently accessed data." + ARCHIVE: "Archive Storage is the lowest-cost, highly durable storage service for data archiving, online backup, and disaster recovery." + } + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + gcp_cloud_storage: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: { + GOOGLE_APPLICATION_CREDENTIALS: { + description: "The filename for a Google Cloud service account credentials JSON file used for authentication." + name: "GOOGLE_APPLICATION_CREDENTIALS" + relevant_when: "endpoint = null" + common: true + type: { + string: { + default: null + examples: ["/path/to/credentials.json"] + syntax: "literal" + } + } + required: false + warnings: [] + } + } + type: "gcp_cloud_storage" + #ExampleConfig: { + title: string + configuration: { + acl: null + bucket: null + credentials_path: null + filename_append_uuid: null + filename_extension: null + filename_time_format: null + key_prefix: null + metadata: null + storage_class: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + how_it_works: { + object_access_control_list: { + #Subsection: { + title: string + body: string + } + name: "object_access_control_list" + title: "Object access control list (ACL)" + body: """ + GCP Cloud Storage supports access control lists (ACL) for buckets and + objects. In the context of Vector, only object ACLs are relevant (Vector + does not create or modify buckets). You can set the object level ACL by + using the `acl` option, which allows you to set one of the [predefined + ACLs](https://cloud.google.com/storage/docs/access-control/lists#predefined-acl) on each created object. + """ + } + object_naming: { + #Subsection: { + title: string + body: string + } + name: "object_naming" + title: "Object Naming" + body: """ + By default, Vector will name your GCS objects in the following format: + + + + + + ```text + -.log + ``` + + For example: + + ```text + date=2019-06-18/1560886634-fddd7a0e-fad9-4f7e-9bce-00ae5debc563.log + ``` + + + + + ```text + -.log.gz + ``` + + For example: + + ```text + date=2019-06-18/1560886634-fddd7a0e-fad9-4f7e-9bce-00ae5debc563.log.gz + ``` + + + + + Vector appends a [UUIDV4][urls.uuidv4] token to ensure there are no name + conflicts in the unlikely event 2 Vector instances are writing data at the same + time. + + You can control the resulting name via the `key_prefix`, `filename_time_format`, + and `filename_append_uuid` options. + """ + } + storage_class: { + #Subsection: { + title: string + body: string + } + name: "storage_class" + title: "Storage Class" + body: """ + GCS offers [storage classes](https://cloud.google.com/storage/docs/storage-classes). You can apply + defaults, and rules, at the bucket level or set the storage class at the + object level. In the context of Vector only the object level is relevant + (Vector does not create or modify buckets). You can set the storage + class via the `storage_class` option. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + gcp_authentication: { + #Subsection: { + title: string + body: string + } + name: "gcp_authentication" + title: "GCP Authentication" + body: """ + GCP offers a [variety of authentication methods](https://cloud.google.com/docs/authentication/) and + Vector is concerned with the [server to server methods](https://cloud.google.com/docs/authentication/production) + and will find credentials in the following order: + + 1. If the [`credentials_path`](#credentials_path) option is set. + 1. If the `api_key` option is set. + 1. If the [`GOOGLE_APPLICATION_CREDENTIALS`](#google_application_credentials) envrionment variable is set. + 1. Finally, Vector will check for an [instance service account](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually). + + If credentials are not found the [healtcheck](#healthchecks) will fail and an + error will be [logged][docs.monitoring#logs]. + """ + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + tags_and_metadata: { + #Subsection: { + title: string + body: string + } + name: "tags_and_metadata" + title: "Tags & Metadata" + body: """ + Vector supports adding [custom metadata](https://cloud.google.com/storage/docs/metadata#custom-metadata) to + created objects. These metadata items are a way of associating extra + data items with the object that are not part of the uploaded data. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + permissions: { + iam: [{ + #Policy: { + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "\(_service).\(_action)" + } + platform: "gcp" + policies: [{ + #RequiredFor: "write" | "healthcheck" + required_for: ["write"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "storage.objects.create" + }, { + #RequiredFor: "write" | "healthcheck" + required_for: ["healthcheck"] + docs_url: "https://cloud.google.com/iam/docs/permissions-reference" + action: "storage.objects.get" + }] + }] + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + statsd: { + kind: "sink" + title: "Statsd" + classes: { + commonly_used: true + delivery: "best_effort" + development: "stable" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: false + } + send_buffer_bytes: { + enabled: true + relevant_when: "mode = `tcp` or mode = `udp` && os = `unix`" + } + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Statsd receiver" + thing: "a Statsd receiver" + url: "https://github.com/statsd/statsd" + versions: null + description: "[StatsD](https://github.com/statsd/statsd) is a standard and, by extension, a set of tools that can be used to send, collect, and aggregate custom metrics from any application. Originally, StatsD referred to a daemon written by [Etsy](https://www.etsy.com) in Node." + } + interface: { + socket: { + direction: "outgoing" + protocols: ["tcp", "udp", "unix"] + ssl: "required" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: false + set: true + summary: false + } + } + configuration: { + address: { + description: "The address to connect to. The address _must_ include a port." + name: "address" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["92.12.333.224:5000"] + syntax: "literal" + } + } + } + mode: { + description: "The type of socket to use." + name: "mode" + required: true + warnings: [] + type: { + string: { + enum: { + tcp: "TCP socket" + udp: "UDP socket" + unix: "Unix domain socket" + } + examples: ["tcp", "udp", "unix"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["json", "text"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + keepalive: { + common: false + category: "Keepalive" + description: "Configures the TCP keepalive behavior for the connection to the sink." + name: "keepalive" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + time_secs: { + common: false + description: "The time a connection needs to be idle before sending TCP keepalive probes." + name: "time_secs" + required: false + warnings: [] + type: { + uint: { + default: null + unit: "seconds" + } + } + } + } + } + } + } + tls: { + category: "Tls" + common: false + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + enabled: { + common: true + description: "Enable TLS during connections to the remote." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + verify_certificate: { + common: false + description: "If `true` (the default), Vector will validate the TLS certificate of the remote host." + name: "verify_certificate" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + path: { + description: "The unix socket path. This should be the absolute path." + name: "path" + relevant_when: "mode = `tcp` or `udp`" + required: true + warnings: [] + type: { + string: { + examples: ["/path/to/socket"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + statsd: "The type of this component." + socket: "The type of this component." + } + syntax: "literal" + } + } + } + default_namespace: { + common: true + description: """ + Used as a namespace for metrics that don't have it. + A namespace will be prefixed to a metric's name. + """ + name: "default_namespace" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["service"] + syntax: "literal" + } + } + } + } + env_vars: {} + type: "statsd" + #ExampleConfig: { + title: string + configuration: { + address: null + mode: null + path: null + type: null + default_namespace: null + inputs: null + buffer: null + encoding: null + healthcheck: null + keepalive: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + processing_errors_total: { + description: "The total number of processing errors encountered by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + error_type: { + name: "error_type" + description: "The type of the error" + required: true + enum: { + field_missing: "The event field was missing." + invalid_metric: "The metric was invalid." + mapping_failed: "The mapping failed." + match_failed: "The match operation failed." + parse_failed: "The parsing operation failed." + render_error: "The rendering operation failed." + type_conversion_failed: "The type conversion operating failed." + type_field_does_not_exist: "The type field does not exist." + type_ip_address_parse_error: "The IP address did not parse." + value_invalid: "The value was invalid." + } + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "processing_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + } + } + pulsar: { + kind: "sink" + title: "Apache Pulsar" + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "stream" + service_providers: [] + stateful: false + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: true + } + send: { + compression: { + enabled: false + } + encoding: { + enabled: true + codec: { + enabled: true + default: null + enum: ["text", "json"] + } + } + request: { + enabled: false + } + tls: { + enabled: false + } + to: { + service: { + name: "Apache Pulsar" + thing: "an Apache Pulsar cluster" + url: "https://pulsar.apache.org/" + versions: null + description: "[Pulsar](https://pulsar.apache.org/) is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the Apache Software Foundation. It is an excellent tool for streaming logs and metrics data." + } + interface: { + socket: { + api: { + title: "Pulsar protocol" + url: "https://pulsar.apache.org/docs/en/develop-binary-protocol/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "disabled" + } + } + } + } + descriptions: { + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [] + notices: [] + } + configuration: { + auth: { + common: false + category: "Auth" + description: "Options for the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + name: { + common: false + description: "The basic authentication name." + name: "name" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${PULSAR_NAME}", "name123"] + syntax: "literal" + } + } + } + token: { + common: false + description: "The basic authentication password." + name: "token" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${PULSAR_TOKEN}", "123456789"] + syntax: "literal" + } + } + } + } + } + } + } + endpoint: { + description: "Endpoint to which the pulsar client should connect to." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["pulsar://127.0.0.1:6650"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + codec: { + description: "The encoding codec used to serialize the events before outputting." + name: "codec" + required: true + warnings: [] + type: { + string: { + examples: ["text", "json"] + syntax: "literal" + } + } + } + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + topic: { + description: "The Pulsar topic name to write events to." + name: "topic" + required: true + warnings: [] + type: { + string: { + examples: ["topic-1234"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + pulsar: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: true + metrics: null + } + env_vars: {} + type: "pulsar" + #ExampleConfig: { + title: string + configuration: { + auth: null + endpoint: null + topic: null + type: null + inputs: null + encoding: null + healthcheck: null + } + input: {} | {} | [{} | {}] + output: string + } + telemetry: { + metrics: { + encode_errors_total: { + description: "The total number of errors encountered when encoding an event." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + } + name: "encode_errors_total" + } + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + } + } + clickhouse: { + kind: "sink" + title: "Clickhouse" + classes: { + commonly_used: true + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["Yandex"] + stateful: false + } + features: { + buffer: { + enabled: true + } + healthcheck: { + enabled: true + } + send: { + batch: { + enabled: true + common: false + max_bytes: 1049000 + max_events: null + timeout_secs: 1 + } + compression: { + enabled: true + default: "gzip" + algorithms: ["none", "gzip"] + levels: ["none", "fast", "default", "best", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + encoding: { + enabled: true + codec: { + enabled: false + } + } + request: { + enabled: true + adaptive_concurrency: true + concurrency: 5 + rate_limit_duration_secs: 1 + rate_limit_num: 5 + retry_initial_backoff_secs: 1 + retry_max_duration_secs: 10 + timeout_secs: 30 + headers: false + } + tls: { + enabled: true + can_enable: false + can_verify_certificate: true + can_verify_hostname: true + enabled_default: false + } + to: { + service: { + name: "Clickhouse" + thing: "a Clickhouse database" + url: "https://clickhouse.yandex/" + versions: null + description: "[ClickHouse](https://clickhouse.yandex/) is an open-source column-oriented database management system that manages extremely large volumes of data, including non-aggregated data, in a stable and sustainable manner and allows generating custom data reports in real time. The system is linearly scalable and can be scaled up to store and process trillions of rows and petabytes of data. This makes it an best-in-class storage for logs and metrics data." + } + interface: { + socket: { + api: { + title: "Clickhouse HTTP API" + url: "https://clickhouse.yandex/docs/en/interfaces/http/" + } + direction: "outgoing" + protocols: ["http"] + ssl: "optional" + } + } + } + } + descriptions: { + buffer: "Buffers data in-memory or on-disk for performance and durability." + compress: "Compresses data to optimize bandwidth." + request: "Automatically retries failed requests, with backoff." + tls_send: "Securely transmits data via Transport Layer Security (TLS)." + batch: "Batches data to maximize throughput." + } + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: ["[Clickhouse](https://clickhouse.yandex/) version `>= 1.1.54378` is required."] + warnings: [] + notices: [] + } + configuration: { + auth: { + common: false + category: "Auth" + description: "Configures the authentication strategy." + name: "auth" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + password: { + description: "The basic authentication password." + name: "password" + required: true + warnings: [] + type: { + string: { + examples: ["${CLICKHOUSE_PASSWORD}", "password"] + syntax: "literal" + } + } + } + strategy: { + description: "The authentication strategy to use." + name: "strategy" + required: true + warnings: [] + type: { + string: { + enum: { + basic: "The [basic authentication strategy](https://en.wikipedia.org/wiki/Basic_access_authentication)." + bearer: "The bearer token authentication strategy." + } + examples: ["basic", "bearer"] + syntax: "literal" + } + } + } + token: { + description: "The token to use for bearer authentication" + name: "token" + required: true + warnings: [] + type: { + string: { + examples: ["${API_TOKEN}", "xyz123"] + syntax: "literal" + } + } + } + user: { + description: "The basic authentication user name." + name: "user" + required: true + warnings: [] + type: { + string: { + examples: ["${CLICKHOUSE_USERNAME}", "username"] + syntax: "literal" + } + } + } + } + } + } + } + database: { + common: true + description: "The database that contains the stable that data will be inserted into." + name: "database" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["mydatabase"] + syntax: "literal" + } + } + } + endpoint: { + description: "The endpoint of the [Clickhouse](https://clickhouse.yandex/) server." + name: "endpoint" + required: true + warnings: [] + type: { + string: { + examples: ["http://localhost:8123"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + buffer: { + common: false + category: "Buffer" + description: "Configures the sink specific buffer behavior." + name: "buffer" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_events: { + common: true + description: "The maximum number of [events][docs.data-model] allowed in the buffer." + name: "max_events" + required: false + relevant_when: "type = \"memory\"" + warnings: [] + type: { + uint: { + default: 500 + unit: "events" + } + } + } + max_size: { + description: "The maximum size of the buffer on the disk." + name: "max_size" + required: true + relevant_when: "type = \"disk\"" + warnings: [] + type: { + uint: { + examples: [104900000] + unit: "bytes" + } + } + } + type: { + common: true + description: "The buffer's type and storage mechanism." + name: "type" + required: false + warnings: [] + type: { + string: { + default: "memory" + enum: { + memory: "Stores the sink's buffer in memory. This is more performant, but less durable. Data will be lost if Vector is restarted forcefully." + disk: "Stores the sink's buffer on disk. This is less performant, but durable. Data will not be lost between restarts." + } + syntax: "literal" + } + } + } + when_full: { + common: false + description: "The behavior when the buffer becomes full." + name: "when_full" + required: false + warnings: [] + type: { + string: { + default: "block" + enum: { + block: "Applies back pressure when the buffer is full. This prevents data loss, but will cause data to pile up on the edge." + drop_newest: "Drops new data as it's received. This data is lost. This should be used when performance is the highest priority." + } + syntax: "literal" + } + } + } + } + } + } + } + batch: { + common: false + category: "Batch" + description: "Configures the sink batching behavior." + name: "batch" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + max_bytes: { + common: true + description: "The maximum size of a batch, in bytes, before it is flushed." + name: "max_bytes" + required: false + warnings: [] + type: { + uint: { + default: 1049000 + unit: "bytes" + } + } + } + timeout_secs: { + common: true + description: "The maximum age of a batch before it is flushed." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + } + } + } + } + compression: { + common: true + description: "The compression strategy used to compress the encoded event data before transmission." + name: "compression" + required: false + warnings: [] + type: { + string: { + default: "gzip" + enum: { + none: "No compression." + gzip: "[Gzip](https://www.gzip.org/) standard DEFLATE compression." + syntax: "literal" + } + syntax: "literal" + } + } + } + encoding: { + category: "Encoding" + description: "Configures the encoding specific sink behavior." + name: "encoding" + required: true + warnings: [] + type: { + object: { + examples: [] + options: { + except_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "except_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + only_fields: { + common: false + description: "Prevent the sink from encoding the specified labels." + name: "only_fields" + required: false + warnings: [] + type: { + array: { + default: null + items: { + type: { + string: { + examples: ["message", "parent.child"] + syntax: "field_path" + } + } + } + } + } + } + timestamp_format: { + common: false + description: "How to format event timestamps." + name: "timestamp_format" + required: false + warnings: [] + type: { + string: { + default: "rfc3339" + enum: { + rfc3339: "Formats as a RFC3339 string" + unix: "Formats as a unix timestamp" + } + syntax: "literal" + } + } + } + } + } + } + } + healthcheck: { + common: true + category: "Healthcheck" + description: "Health check options for the sink." + name: "healthcheck" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: "Enables/disables the healthcheck upon Vector boot." + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + } + } + } + } + request: { + common: false + category: "Request" + description: "Configures the sink request behavior." + name: "request" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + adaptive_concurrency: { + common: false + category: "Adaptive_concurrency" + description: "Configure the adaptive concurrency algorithms. These values have been tuned by optimizing simulated results. In general you should not need to adjust these." + name: "adaptive_concurrency" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + decrease_ratio: { + common: false + description: "The fraction of the current value to set the new concurrency limit when decreasing the limit. Valid values are greater than 0 and less than 1. Smaller values cause the algorithm to scale back rapidly when latency increases. Note that the new limit is rounded down after applying this ratio." + name: "decrease_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.9 + } + } + } + ewma_alpha: { + common: false + description: "The adaptive concurrency algorithm uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with the current RTT. This value controls how heavily new measurements are weighted compared to older ones. Valid values are greater than 0 and less than 1. Smaller values cause this reference to adjust more slowly, which may be useful if a service has unusually high response variability." + name: "ewma_alpha" + required: false + warnings: [] + type: { + float: { + default: 0.7 + } + } + } + rtt_threshold_ratio: { + common: false + description: "When comparing the past RTT average to the current measurements, we ignore changes that are less than this ratio higher than the past RTT. Valid values are greater than or equal to 0. Larger values cause the algorithm to ignore larger increases in the RTT." + name: "rtt_threshold_ratio" + required: false + warnings: [] + type: { + float: { + default: 0.05 + } + } + } + } + } + } + } + concurrency: { + common: true + description: "The maximum number of in-flight requests allowed at any given time, or \"adaptive\" to allow Vector to automatically set the limit based on current network and service conditions." + name: "concurrency" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: "requests" + } + } + } + rate_limit_duration_secs: { + common: true + description: "The time window, in seconds, used for the `rate_limit_num` option." + name: "rate_limit_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + rate_limit_num: { + common: true + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + name: "rate_limit_num" + required: false + warnings: [] + type: { + uint: { + default: 5 + unit: null + } + } + } + retry_attempts: { + common: false + description: "The maximum number of retries to make for failed requests. The default, for all intents and purposes, represents an infinite number of retries." + name: "retry_attempts" + required: false + warnings: [] + type: { + uint: { + default: 18446744073709552000 + unit: null + } + } + } + retry_initial_backoff_secs: { + common: false + description: "The amount of time to wait before attempting the first retry for a failed request. Once, the first retry has failed the fibonacci sequence will be used to select future backoffs." + name: "retry_initial_backoff_secs" + required: false + warnings: [] + type: { + uint: { + default: 1 + unit: "seconds" + } + } + } + retry_max_duration_secs: { + common: false + description: "The maximum amount of time, in seconds, to wait between retries." + name: "retry_max_duration_secs" + required: false + warnings: [] + type: { + uint: { + default: 10 + unit: "seconds" + } + } + } + timeout_secs: { + common: true + description: "The maximum time a request can take before being aborted. It is highly recommended that you do not lower this value below the service's internal timeout, as this could create orphaned requests, pile on retries, and result in duplicate data downstream." + name: "timeout_secs" + required: false + warnings: [] + type: { + uint: { + default: 30 + unit: "seconds" + } + } + } + } + } + } + } + tls: { + common: false + category: "Tls" + description: "Configures the TLS options for incoming connections." + name: "tls" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + ca_file: { + common: false + description: "Absolute path to an additional CA certificate file, in DER or PEM format (X.509), or an inline CA certificate in PEM format." + name: "ca_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/certificate_authority.crt"] + syntax: "literal" + } + } + } + crt_file: { + common: true + description: "Absolute path to a certificate file used to identify this connection, in DER or PEM format (X.509) or PKCS#12, or an inline certificate in PEM format. If this is set and is not a PKCS#12 archive, `key_file` must also be set." + name: "crt_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.crt"] + syntax: "literal" + } + } + } + key_file: { + common: true + description: "Absolute path to a private key file used to identify this connection, in DER or PEM format (PKCS#8), or an inline private key in PEM format. If this is set, `crt_file` must also be set." + name: "key_file" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["/path/to/host_certificate.key"] + syntax: "literal" + } + } + } + verify_hostname: { + common: false + description: "If `true` (the default), Vector will validate the configured remote host name against the remote host's TLS certificate. Do NOT set this to `false` unless you understand the risks of not verifying the remote hostname." + name: "verify_hostname" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + key_pass: { + common: false + description: "Pass phrase used to unlock the encrypted key file. This has no effect unless `key_file` is set." + name: "key_pass" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["${KEY_PASS_ENV_VAR}", "PassWord1"] + syntax: "literal" + } + } + } + } + } + } + } + table: { + description: "The table that data will be inserted into." + name: "table" + required: true + warnings: [] + type: { + string: { + examples: ["mytable"] + syntax: "literal" + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + clickhouse: "The type of this component." + } + syntax: "literal" + } + } + } + } + env_vars: {} + type: "clickhouse" + #ExampleConfig: { + title: string + configuration: { + auth: null + database: null + endpoint: null + table: null + type: null + inputs: null + buffer: null + batch: null + compression: null + encoding: null + healthcheck: null + request: null + tls: null + } + input: {} | {} | [{} | {}] + output: string + } + input: { + logs: true + metrics: null + } + how_it_works: { + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + healthchecks: { + #Subsection: { + title: string + body: string + } + name: "healthchecks" + title: "Health checks" + body: """ + Health checks ensure that the downstream service is + accessible and ready to accept data. This check is performed + upon sink initialization. If the health check fails an error + will be logged and Vector will proceed to start. + """ + sub_sections: [{ + title: "Require health checks" + body: """ + If you'd like to exit immediately upon a health + check failure, you can pass the + `--require-healthy` flag: + + ```bash + vector --config /etc/vector/vector.toml --require-healthy + ``` + """ + }, { + title: "Disable health checks" + body: """ + If you'd like to disable health checks for this + sink you can set the `healthcheck` option to + `false`. + """ + }] + } + partitioning: { + #Subsection: { + title: string + body: string + } + name: "partitioning" + title: "Partitioning" + body: """ + Vector supports dynamic configuration values through a simple + template syntax. If an option supports templating, it will be + noted with a badge and you can use event fields to create dynamic + values. For example: + + ```toml title="vector.toml" + [sinks.my-sink] + \tdynamic_option = "application={{ application_id }}" + ``` + + In the above example, the `application_id` for each event will be + used to partition outgoing data. + """ + } + rate_limits: { + #Subsection: { + title: string + body: string + } + name: "rate_limits" + title: "Rate limits & adapative concurrency" + body: null + sub_sections: [{ + title: "Adaptive Request Concurrency (ARC)" + body: """ + Adaptive Requst Concurrency is a feature of Vector that does away + with static rate limits and automatically optimizes HTTP + concurrency limits based on downstream service responses. The + underlying mechanism is a feedback loop inspired by TCP congestion + control algorithms. Checkout the [announcement blog post](/blog/adaptive-request-concurrency/), + + We highly recommend enabling this feature as it improves + performance and reliability of Vector and the systems it + communicates with. + + To enable, set the `request.concurrency` option to `adaptive`: + + ```toml title="vector.toml" + [sinks.my-sink] + request.concurrency = "adaptive" + ``` + """ + }, { + title: "Static rate limits" + body: """ + If Adaptive Request Concurrency is not for you, you can manually + set static rate limits with the `request.rate_limit_duration_secs`, + `request.rate_limit_num`, and `request.concurrency` options: + + ```toml title="vector.toml" + [sinks.my-sink] + request.rate_limit_duration_secs = 1 + request.rate_limit_num = 10 + request.concurrency = 10 + ``` + """ + }] + } + transport_layer_security: { + #Subsection: { + title: string + body: string + } + name: "transport_layer_security" + title: "Transport Layer Security (TLS)" + body: """ + Vector uses [Openssl](https://www.openssl.org/) for TLS protocols for it's + maturity. You can enable and adjust TLS behavior via the `tls.*` + options. + """ + } + buffers_batches: { + #Subsection: { + title: string + body: string + } + name: "buffers_batches" + title: "Buffers & batches" + body: """ + + + This component buffers & batches data as shown in the diagram above. You'll notice that Vector treats these concepts + differently, instead of treating them as global concepts, Vector treats them + as sink specific concepts. This isolates sinks, ensuring services disruptions + are contained and delivery guarantees are honored. + + *Batches* are flushed when 1 of 2 conditions are met: + + 1. The batch age meets or exceeds the configured `timeout_secs`. + 2. The batch size meets or exceeds the configured <% if component.options.batch.children.respond_to?(:max_size) %>`max_size`<% else %>`max_events`<% end %>. + + *Buffers* are controlled via the [`buffer.*`](#buffer) options. + """ + } + retry_policy: { + #Subsection: { + title: string + body: string + } + name: "retry_policy" + title: "Retry policy" + body: """ + Vector will retry failed requests (status == 429, >= 500, and != 501). + Other responses will not be retried. You can control the number of + retry attempts and backoff rate with the `request.retry_attempts` and + `request.retry_backoff_secs` options. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + prometheus_exporter: { + kind: "sink" + title: "Prometheus Exporter" + alias: "prometheus" + classes: { + commonly_used: true + delivery: "best_effort" + development: "stable" + egress_method: "expose" + service_providers: [] + stateful: true + } + features: { + buffer: { + enabled: false + } + healthcheck: { + enabled: false + } + exposes: { + tls: { + enabled: true + can_enable: true + can_verify_certificate: true + enabled_default: false + } + for: { + service: { + name: "Prometheus" + thing: "a Prometheus database" + url: "https://prometheus.io/" + versions: null + description: "[Prometheus](https://prometheus.io/) is a pull-based monitoring system that scrapes metrics from configured endpoints, stores them efficiently, and supports a powerful query language to compose dynamic information from a variety of otherwise unrelated data points." + } + interface: { + socket: { + api: { + title: "Prometheus text exposition format" + url: "https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format" + } + direction: "incoming" + port: 9598 + protocols: ["http"] + ssl: "disabled" + } + } + } + } + descriptions: {} + } + support: { + targets: { + "aarch64-unknown-linux-gnu": true + "aarch64-unknown-linux-musl": true + "armv7-unknown-linux-gnueabihf": true + "armv7-unknown-linux-musleabihf": true + "x86_64-apple-darwin": true + "x86_64-pc-windows-msv": true + "x86_64-unknown-linux-gnu": true + "x86_64-unknown-linux-musl": true + } + requirements: [] + warnings: [""" + High cardinality metric names and labels are discouraged by + Prometheus as they can provide performance and reliability + problems. You should consider alternative strategies to reduce + the cardinality. Vector offers a [`tag_cardinality_limit` transform][docs.transforms.tag_cardinality_limit] + as a way to protect against this. + """] + notices: [] + } + configuration: { + address: { + description: "The address to expose for scraping." + name: "address" + required: true + warnings: [] + type: { + string: { + examples: ["0.0.0.0:9598"] + syntax: "literal" + } + } + } + buckets: { + common: false + description: "Default buckets to use for aggregating [distribution][docs.data-model.metric#distribution] metrics into histograms." + name: "buckets" + required: false + warnings: [] + type: { + array: { + default: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0] + items: { + type: { + float: { + examples: [0.005, 0.01] + } + } + } + } + } + } + flush_period_secs: { + common: false + description: "Time interval between [set][docs.data-model.metric#set] values are reset." + name: "flush_period_secs" + required: false + warnings: [] + type: { + uint: { + default: 60 + unit: "seconds" + } + } + } + default_namespace: { + common: true + description: """ + Used as a namespace for metrics that don't have it. Typically + namespaces are set during ingestion (sources), but it is + optional and when missing, we'll use this value. It should + follow Prometheus [naming conventions](https://prometheus.io/docs/practices/naming/#metric-names). + """ + name: "default_namespace" + required: false + warnings: [] + type: { + string: { + default: null + examples: ["service"] + syntax: "literal" + } + } + } + inputs: { + description: """ + A list of upstream [source](https://vector.dev/docs/reference/sources/) or [transform](https://vector.dev/docs/reference/transforms/) + IDs. Wildcards (`*`) are supported but _must_ be the last character in the ID. + + See [configuration](https://vector.dev/docs/configuration/) for more info. + """ + name: "inputs" + required: true + warnings: [] + sort: -1 + type: { + array: { + items: { + type: { + string: { + examples: ["my-source-or-transform-id", "prefix-*"] + syntax: "literal" + } + } + } + } + } + } + quantiles: { + common: false + description: "Quantiles to use for aggregating [distribution][docs.data-model.metric#distribution] metrics into a summary." + name: "quantiles" + required: false + warnings: [] + type: { + array: { + default: [0.5, 0.75, 0.9, 0.95, 0.99] + items: { + type: { + float: { + examples: [0.5, 0.75, 0.9, 0.95, 0.99] + } + } + } + } + } + } + type: { + description: "The component type. This is a required field for all components and tells Vector which component to use." + name: "type" + required: true + warnings: [] + sort: -2 + type: { + string: { + enum: { + prometheus_exporter: "The type of this component." + } + syntax: "literal" + } + } + } + } + input: { + logs: false + metrics: { + counter: true + distribution: true + gauge: true + histogram: true + set: false + summary: true + } + } + env_vars: {} + type: "prometheus_exporter" + #ExampleConfig: { + title: string + configuration: { + address: null + buckets: null + flush_period_secs: null + default_namespace: null + quantiles: null + type: null + inputs: null + } + input: {} | {} | [{} | {}] + output: string + } + examples: [{ + title: "Counter" + configuration: { + default_namespace: "service" + address: null + buckets: null + flush_period_secs: null + quantiles: null + type: null + inputs: null + } + input: { + metric: { + kind: "incremental" + name: "logins" + counter: { + value: 1.5 + } + tags: { + host: "my-host.local" + } + } + } + output: """ + # HELP service_logins logins + # TYPE service_logins counter + service_logins{host="my-host.local"} 1.5 + """ + }, { + title: "Gauge" + configuration: { + address: null + buckets: null + flush_period_secs: null + default_namespace: null + quantiles: null + type: null + inputs: null + } + input: { + metric: { + kind: "absolute" + name: "memory_rss" + namespace: "app" + gauge: { + value: 1.5 + } + tags: { + host: "my-host.local" + } + } + } + output: """ + # HELP app_memory_rss memory_rss + # TYPE app_memory_rss gauge + app_memory_rss{host="my-host.local"} 1.5 + """ + }, { + title: "Histogram" + configuration: { + address: null + buckets: null + flush_period_secs: null + default_namespace: null + quantiles: null + type: null + inputs: null + } + input: { + metric: { + kind: "absolute" + name: "response_time_s" + tags: {} + histogram: { + buckets: [{ + upper_limit: 0.005 + count: 0 + }, { + upper_limit: 0.01 + count: 1 + }, { + upper_limit: 0.025 + count: 0 + }, { + upper_limit: 0.05 + count: 1 + }, { + upper_limit: 0.1 + count: 0 + }, { + upper_limit: 0.25 + count: 0 + }, { + upper_limit: 0.5 + count: 0 + }, { + upper_limit: 1.0 + count: 0 + }, { + upper_limit: 2.5 + count: 0 + }, { + upper_limit: 5.0 + count: 0 + }, { + upper_limit: 10.0 + count: 0 + }] + count: 2 + sum: 0.789 + } + } + } + output: """ + # HELP response_time_s response_time_s + # TYPE response_time_s histogram + response_time_s_bucket{le="0.005"} 0 + response_time_s_bucket{le="0.01"} 1 + response_time_s_bucket{le="0.025"} 0 + response_time_s_bucket{le="0.05"} 1 + response_time_s_bucket{le="0.1"} 0 + response_time_s_bucket{le="0.25"} 0 + response_time_s_bucket{le="0.5"} 0 + response_time_s_bucket{le="1.0"} 0 + response_time_s_bucket{le="2.5"} 0 + response_time_s_bucket{le="5.0"} 0 + response_time_s_bucket{le="10.0"} 0 + response_time_s_bucket{le="+Inf"} 0 + response_time_s_sum 0.789 + response_time_s_count 2 + """ + }, { + title: "Distribution to histogram" + notes: "Histogram will be computed out of values and then passed to prometheus." + configuration: { + buckets: [0.0, 1.0, 3.0] + address: null + flush_period_secs: null + default_namespace: null + quantiles: null + type: null + inputs: null + } + input: { + metric: { + name: "request_retries" + kind: "incremental" + distribution: { + samples: [{ + value: 0.0 + rate: 4 + }, { + value: 1.0 + rate: 2 + }, { + value: 4.0 + rate: 1 + }] + statistic: "histogram" + } + tags: { + host: "my-host.local" + } + } + } + output: """ + # HELP request_retries request_retries + # TYPE request_retries histogram + request_retries_bucket{host="my-host.local",le="0"} 4 + request_retries_bucket{host="my-host.local",le="1"} 6 + request_retries_bucket{host="my-host.local",le="3"} 6 + request_retries_bucket{host="my-host.local",le="+Inf"} 7 + request_retries_sum{host="my-host.local"} 6 + request_retries_count{host="my-host.local"} 7 + """ + }, { + title: "Distribution to summary" + notes: "Summary will be computed out of values and then passed to prometheus." + configuration: { + quantiles: [0.5, 0.75, 0.95] + address: null + buckets: null + flush_period_secs: null + default_namespace: null + type: null + inputs: null + } + input: { + metric: { + name: "request_retries" + kind: "incremental" + tags: {} + distribution: { + samples: [{ + value: 0.0 + rate: 3 + }, { + value: 1.0 + rate: 2 + }, { + value: 4.0 + rate: 1 + }] + statistic: "summary" + } + } + } + output: """ + # HELP request_retries request_retries + # TYPE request_retries summary + request_retries{quantile="0.5"} 0 + request_retries{quantile="0.75"} 1 + request_retries{quantile="0.95"} 4 + request_retries_sum 6 + request_retries_count 6 + request_retries_min 0 + request_retries_max 4 + request_retries_avg 1 + """ + }, { + title: "Summary" + configuration: { + address: null + buckets: null + flush_period_secs: null + default_namespace: null + quantiles: null + type: null + inputs: null + } + input: { + metric: { + name: "requests" + kind: "absolute" + summary: { + quantiles: [{ + upper_limit: 0.01 + value: 1.5 + }, { + upper_limit: 0.5 + value: 2.0 + }, { + upper_limit: 0.99 + value: 3.0 + }] + count: 6 + sum: 12.0 + } + tags: { + host: "my-host.local" + } + } + } + output: """ + # HELP requests requests + # TYPE requests summary + requests{host="my-host.local",quantile="0.01"} 1.5 + requests{host="my-host.local",quantile="0.5"} 2 + requests{host="my-host.local",quantile="0.99"} 3 + requests_sum{host="my-host.local"} 12 + requests_count{host="my-host.local"} 6 + """ + }] + how_it_works: { + histogram_buckets: { + #Subsection: { + title: string + body: string + } + name: "histogram_buckets" + title: "Histogram Buckets" + body: """ + Choosing the appropriate buckets for Prometheus histograms is a complicated + point of discussion. The [Histograms and Summaries Prometheus guide](\\(urls.prometheus_histograms_guide)) provides a good overview of histograms, + buckets, summaries, and how you should think about configuring them. The buckets + you choose should align with your known range and distribution of values as + well as how you plan to report on them. The aforementioned guide provides + examples on how you should align them. + """ + sub_sections: [{ + title: "Default Buckets" + body: """ + The `buckets` option defines the global default buckets for histograms. + These defaults are tailored to broadly measure the response time (in seconds) + of a network service. Most likely, however, you will be required to define + buckets customized to your use case. + """ + }] + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: """ + This component is stateful, meaning its behavior changes based on previous inputs (events). + State is not preserved across restarts, therefore state-dependent behavior will reset between + restarts and depend on the inputs (events) received since the most recent restart. + """ + } + memory_usage: { + #Subsection: { + title: string + body: string + } + name: "memory_usage" + title: "Memory Usage" + body: """ + Like other Prometheus instances, the `prometheus` sink aggregates + metrics in memory which keeps the memory footprint to a minimum if Prometheus + fails to scrape the Vector instance over an extended period of time. The + downside is that data will be lost if Vector is restarted. This is by design of + Prometheus' pull model approach, but is worth noting if restart Vector + frequently. + """ + } + } + telemetry: { + metrics: { + events_in_total: { + description: "The total number of events accepted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_in_total" + } + events_out_total: { + description: "The total number of events emitted by this component." + type: "counter" + default_namespace: "vector" + tags: { + instance: { + name: "instance" + description: "The Vector instance identified by host and port." + required: true + examples: ["vector:9598"] + } + component_kind: { + name: "component_kind" + description: "The Vector component kind." + required: true + enum: { + sink: "Vector sink components" + source: "Vector source components" + transform: "Vector transform components" + } + } + component_name: { + name: "component_name" + description: "The Vector component ID." + required: true + examples: ["file_source", "splunk_sink"] + } + job: { + name: "job" + description: "The name of the job producing Vector metrics." + required: true + default: "vector" + } + component_type: { + name: "component_type" + description: "The Vector component type." + required: true + examples: ["file", "http", "honeycomb", "splunk_hec"] + } + } + name: "events_out_total" + } + } + } + } + } +} +#ComponentConfig: {} +#Member: { + id: strings.ToLower(name) + name: !="" + avatar: "\(github).png" + github: "https://github.com/\(_github)" +} +services: { + nginx: { + name: "Nginx" + thing: "an Nginx server" + url: "https://www.nginx.com/" + versions: null + description: "[Nginx][urls.nginx] is an HTTP and reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy server." + } + host: { + name: "a Host (Node)" + thing: "a Host (Node)" + url: "https://en.wikipedia.org/wiki/Host_(network)" + versions: null + } + aws_s3: { + name: "AWS S3" + thing: "an AWS S3 bucket" + url: "https://aws.amazon.com/s3/" + versions: null + description: "[Amazon Simple Storage Service (Amazon S3)](https://aws.amazon.com/s3/) is a scalable, high-speed, web-based cloud storage service designed for online backup and archiving of data and applications on Amazon Web Services. It is very commonly used to store log data." + connect_to: { + vector: { + logs: { + setup: [{ + title: "Create an AWS SQS queue" + description: "Create an AWS SQS queue for Vector to consume bucket notifications from." + detour: { + url: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-create-queue.html" + } + }, { + title: "Publish S3 bucket notifications to the queue" + description: """ + Configure S3 to publish Bucket notifications to your previously created SQS queue. + Ensure that it only publishes the following events: + + - PUT + - POST + - COPY + - Multipart upload completed + + These represent object creation events and ensure Vector does not double process + S3 objects. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonS3/latest/dev/ways-to-add-notification-config-to-bucket.html" + } + }, { + title: "Configure Vector" + description: """ + Using the SQS queue URL provided to you by AWS, configure the Vector `aws_s3` + source to use the SQS queue via the `sqs.queue_url` option. + """ + vector: { + configure: { + sources: { + aws_s3: { + type: "aws_s3" + sqs: { + queue_url: "" + } + } + } + } + } + }] + } + } + } + } + files: { + name: "files" + thing: "files" + url: "https://en.wikipedia.org/wiki/File_system" + versions: null + } + datadog_metrics: { + name: "Datadog metrics" + thing: "a Datadog metrics database" + url: "https://docs.datadoghq.com/metrics/" + versions: null + description: "[Datadog](https://www.datadoghq.com) is a monitoring service for cloud-scale applications, providing monitoring of servers, databases, tools, and services, through a SaaS-based data analytics platform." + } + nats: { + name: "NATS" + thing: "a NATS server" + url: "https://nats.io/" + versions: null + description: "[NATS.io](https://nats.io/) is a simple, secure and high performance open source messaging system for cloud native applications, IoT messaging, and microservices architectures. NATS.io is a Cloud Native Computing Foundation project." + } + mongodb: { + name: "MongoDB" + thing: "an MongoDB instance" + url: "https://www.mongodb.com" + versions: null + description: "[MongoDB][urls.mongodb] is a general purpose, document-based, distributed database built for modern application developers and for the cloud era." + } + aws_cloudwatch_logs: { + name: "AWS Cloudwatch logs" + thing: "an AWS Cloudwatch logs stream" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html" + versions: null + description: "[Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and management service that provides data and actionable insights for AWS, hybrid, and on-premises applications, and infrastructure resources. With CloudWatch, you can collect and access all your performance and operational data in form of logs and metrics from a single platform." + connect_to: { + aws_kinesis_firehose: { + logs: { + setup: [{ + title: "Stream CloudWatch logs to Firehose" + description: """ + Using your configured AWS Firehose delivery stream, we'll need to + stream AWS Cloudwatch Logs to it. We achieve this through AWS Cloudwatch Logs + subscriptions. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html#FirehoseExample" + } + }] + } + } + aws_s3: { + logs: { + description: """ + AWS Cloudwatch logs can export log data to S3 which can then be + imported by Vector via the `aws_s3` source. Please note, this is + a single export, not a stream of data. If you want Vector to + continuously ingest AWS Cloudwatch logs data you will need to + follow the AWS Cloudwatch logs to AWS Kinesis tutorial. + """ + setup: [{ + title: "Export AWS Cloudwatch logs data to AWS S3" + description: """ + Follow the AWS CloudWatch to S3 export guide to export + your Cloudwatch logs data to the S3 bucket of your choice. + """ + detour: { + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/S3Export.html" + } + }] + } + } + } + } + elasticsearch: { + name: "Elasticsearch" + thing: "an Elasticsearch database" + url: "https://www.elastic.co/products/elasticsearch" + versions: null + description: "[Elasticsearch](https://www.elastic.co/products/elasticsearch) is a search engine based on the Lucene library. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents. As a result, it is very commonly used to store and analyze log data. It ships with Kibana which is a simple interface for visualizing and exploring data in Elasticsearch." + } + prometheus: { + name: "Prometheus" + thing: "a Prometheus database" + url: "https://prometheus.io/" + versions: null + description: "[Prometheus](https://prometheus.io/) is a pull-based monitoring system that scrapes metrics from configured endpoints, stores them efficiently, and supports a powerful query language to compose dynamic information from a variety of otherwise unrelated data points." + } + syslog: { + name: "Syslog" + thing: "Syslog" + url: "https://en.wikipedia.org/wiki/Syslog" + versions: null + description: "[Syslog](https://en.wikipedia.org/wiki/Syslog) stands for System Logging Protocol and is a standard protocol used to send system log or event messages to a specific server, called a syslog server. It is used to collect various device logs from different machines and send them to a central location for monitoring and review." + } + humio: { + name: "Humio" + thing: "a Humio database" + url: "https://humio.com" + versions: null + description: "[Humio][urls.humio] is a time-series logging and aggregation platform for unrestricted, comprehensive event analysis, On-Premises or in the Cloud. With 1TB/day of raw log ingest/node, in-memory stream processing, and live, shareable dashboards and alerts, you can instantly and in real-time explore, monitor, and visualize any system’s data. Metrics are converted to log events via the metric_to_log transform." + } + kubernetes: { + name: "Kubernetes" + thing: "a Kubernetes cluster" + url: "https://kubernetes.io" + versions: ">= 1.14" + } + gcp_operations_logs: { + name: "GCP Operations (formerly Stackdriver) logs" + thing: "a GCP Operations (formerly Stackdriver) logs account" + url: "https://cloud.google.com/logging/docs/reference/v2/rest/" + versions: null + description: "[Stackdriver][urls.gcp_stackdriver] is Google Cloud's embedded observability suite designed to monitor, troubleshoot, and improve cloud infrastructure, software and application performance. Stackdriver enables you to efficiently build and run workloads, keeping applications available and performing well." + } + sematext: { + name: "Sematext" + thing: "a Sematext account" + url: "https://sematext.com" + versions: null + description: "[Sematext](https://sematext.com) is a hosted monitoring platform based on Elasticsearch. Providing powerful monitoring and management solutions to monitor and observe your apps in real-time." + } + stdout: { + name: "STDOUT" + thing: "a STDOUT stream" + url: "https://en.wikipedia.org/wiki/Standard_streams#Standard_output_(stdout)" + versions: null + } + influxdb: { + name: "InfluxDB" + thing: "an InfluxDB database" + url: "https://www.influxdata.com/products/influxdb-overview/" + versions: null + description: "[InfluxDB](https://www.influxdata.com/products/influxdb-overview/) is an open-source time series database developed by InfluxData. It is written in Go and optimized for fast, high-availability storage and retrieval of time series data in fields such as operations monitoring, application metrics, Internet of Things sensor data, and real-time analytics." + } + papertrail: { + name: "Papertrail" + thing: "a Papertrail account" + url: "https://www.papertrail.com/" + versions: null + description: "[Papertrail](https://www.papertrail.com/) is a web-based log aggregation application used by developers and IT team to search and view logs in real time." + } + loki: { + name: "Loki" + thing: "a Loki database" + url: "https://grafana.com/oss/loki/" + versions: null + description: "[Loki][urls.loki] is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by [Prometheus][urls.prometheus]. It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream." + } + aws_sqs: { + name: "AWS Simple Queue Service" + thing: "an AWS Simple Queue Service queue" + url: "https://aws.amazon.com/sqs/" + versions: null + description: "[Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) is a fully managed message queuing service that enables you to decouple and scale microservices, distributed systems, and serverless applications." + } + http: { + name: "HTTP" + thing: "an HTTP client" + url: "https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Client_request" + versions: null + } + datadog_logs: { + name: "Datadog logs" + thing: "a Datadog logs index" + url: "https://docs.datadoghq.com/logs/" + versions: null + description: "[Datadog](https://www.datadoghq.com) is a monitoring service for cloud-scale applications, providing monitoring of servers, databases, tools, and services, through a SaaS-based data analytics platform." + } + kafka: { + name: "Kafka" + thing: "Kafka topics" + url: "https://kafka.apache.org/" + versions: ">= 0.8" + description: "[Apache Kafka](https://kafka.apache.org/) is an open-source project for a distributed publish-subscribe messaging system rethought as a distributed commit log. Kafka stores messages in topics that are partitioned and replicated across multiple brokers in a cluster. Producers send messages to topics from which consumers read. These features make it an excellent candidate for durably storing logs and metrics data." + } + aws_kinesis_data_streams: { + name: "AWS Kinesis Data Streams" + thing: "a AWS Kinesis Data Streams stream" + url: "https://aws.amazon.com/kinesis/data-streams/" + versions: null + description: "[Amazon Kinesis Data Streams](https://aws.amazon.com/kinesis/data-streams/) is a scalable and durable real-time data streaming service that can continuously capture gigabytes of data per second from hundreds of thousands of sources. Making it an excellent candidate for streaming logs and metrics data." + } + aws_ecs: { + name: "Amazon ECS" + thing: "an Amazon ECS container" + url: "https://aws.amazon.com/ecs/" + versions: null + } + aws_kinesis_firehose: { + name: "AWS Kinesis Firehose" + thing: "a AWS Kinesis Firehose stream" + url: "https://aws.amazon.com/kinesis/data-firehose/" + versions: null + description: """ + [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) is a fully + managed service for delivering real-time streaming data to destinations + such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, + Amazon Elasticsearch Service (Amazon ES), and Splunk. + """ + connect_to: { + vector: { + logs: { + setup: [{ + title: "Configure Vector to accept AWS Kinesis Firehose data" + vector: { + configure: { + sources: { + aws_kinesis_firehose: { + type: "aws_kinesis_firehose" + address: "0.0.0.0:443" + access_key: "A94A8FE5CCB19BA61C4C08" + region: "us-east-1" + } + } + } + } + }, { + title: "Configure TLS termination" + description: """ + AWS Kinesis Firehose will only forward to HTTPS (and not HTTP) + endpoints running on port 443. You will need to either put a load + balancer in front of the Vector instance to handle TLS termination + or configure the `tls` options of the Vector `aws_kinesis_firehose` + source to serve a valid certificate. + """ + detour: { + url: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + } + }, { + title: "Create an AWS Kinesis Firehose HTTP Stream" + description: """ + Using your previously configured TLS enabled HTTP endpoint, + let's create a Kinesis Firehose HTTP stream that delivers + data to it. Be sure to use your HTTP endpoint. + """ + detour: { + url: "https://aws.amazon.com/blogs/big-data/stream-data-to-an-http-endpoint-with-amazon-kinesis-data-firehose/" + } + }] + } + } + } + } + socket_receiver: { + name: "socket receiver" + thing: "a socket receiver" + url: "https://en.wikipedia.org/wiki/Network_socket" + versions: null + } + gcp_pubsub: { + name: "GCP PubSub" + thing: "a GCP PubSub pipeline" + url: "https://cloud.google.com/pubsub/" + versions: null + description: "[GCP Pub/Sub][urls.gcp_pubsub] is a fully-managed real-time messaging service that allows you to send and receive messages between independent applications on the Google Cloud Platform." + } + azure_monitor_logs: { + name: "Azure Monitor logs" + thing: "a Azure Monitor logs account" + url: "https://azure.microsoft.com/en-us/services/monitor/" + versions: null + description: "[Azure Monitor](https://azure.microsoft.com/en-us/services/monitor/) is a service in Azure that provides performance and availability monitoring for applications and services in Azure, other cloud environments, or on-premises. Azure Monitor collects data from multiple sources into a common data platform where it can be analyzed for trends and anomalies." + } + logdna: { + name: "LogDNA" + thing: "a LogDNA account" + url: "https://logdna.com/" + versions: null + description: "[LogDNA](https://logdna.com/) is a log management system that allows engineering and DevOps to aggregate all system, server, and application logs into one platform. Collect, monitor, store, tail, and search application logs in with one command-line or web interface." + } + splunk: { + name: "Splunk" + thing: "a Splunk database" + url: "https://www.splunk.com" + versions: null + } + docker: { + name: "Docker" + thing: "the Docker platform" + url: "https://www.docker.com/" + versions: ">= 1.24" + setup: [{ + title: "Install Docker" + description: "Install Docker by following the Docker setup tutorial." + detour: { + url: "https://docs.docker.com/get-docker/" + } + }, { + title: "Verify Docker logs" + description: """ + Ensure that the Docker Engine is properly exposing logs: + + ```bash + docker logs $(docker ps | awk '{ print $1 }') + ``` + + If you receive an error it's likely that you do not have the proper Docker + logging drivers installed. The Docker Engine requires the [`json-file`](https://docs.docker.com/config/containers/logging/json-file/) (default), + [`journald`](docker_logging_driver_journald), or [`local`](https://docs.docker.com/config/containers/logging/local/) Docker + logging drivers to be installed. + """ + }] + } + aws_cloudwatch_metrics: { + name: "AWS Cloudwatch metrics" + thing: "an AWS Cloudwatch metrics namespace" + url: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/working_with_metrics.html" + versions: null + description: "[Amazon CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and management service that provides data and actionable insights for AWS, hybrid, and on-premises applications, and infrastructure resources. With CloudWatch, you can collect and access all your performance and operational data in form of logs and metrics from a single platform." + } + heroku: { + name: "Heroku" + thing: "a Heroku app" + url: "https://devcenter.heroku.com/articles/logplex" + versions: null + description: """ + [Heroku](https://www.heroku.com) is a container-based platform for deploying and + managing applications. It's a platform as a service (PaaS) that is fully + managed, allowing developers on Heroku to focus on their applications + instead of their infrastructure. + """ + setup: [{ + title: "Setup a Heroku app" + description: "Setup a Heroku app by following the Heroku setup instructions." + detour: { + url: "https://devcenter.heroku.com/start" + } + }] + connect_to: { + vector: { + logs: { + setup: [{ + title: "Configure Vector to accept Heroku logs" + vector: { + configure: { + sources: { + logplex: { + type: "logplex" + address: "0.0.0.0:80" + } + } + } + } + }, { + title: "Configure TLS termination" + description: """ + It is _highly_ recommended to configure TLS termination for + your previously configured Vector logplex address. + + You should either put a load balancer in front of the Vector + instance to handle TLS termination or configure the `tls` options + of the Vector `logplex` source to serve a valid certificate. + """ + detour: { + url: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + } + }, { + title: "Setup a Heroku log drain" + description: """ + Using your exposed Vector HTTP address, create a [Heroku log drain](https://devcenter.heroku.com/articles/log-drains#https-drains) + that points to your Vector instance's address: + + ```bash + heroku drains:add https://:@
-a + ``` + """ + }] + } + } + } + } + prometheus_client: { + name: "Prometheus client" + thing: "a Prometheus client" + url: "https://prometheus.io/docs/instrumenting/clientlibs/" + versions: null + } + new_relic_logs: { + name: "New Relic logs" + thing: "a New Relic logs account" + url: "https://newrelic.com/" + versions: null + description: "[New Relic][urls.new_relic] is a San Francisco, California-based technology company which develops cloud-based software to help website and application owners track the performances of their services." + } + statsd_receiver: { + name: "Statsd receiver" + thing: "a Statsd receiver" + url: "https://github.com/statsd/statsd" + versions: null + description: "[StatsD](https://github.com/statsd/statsd) is a standard and, by extension, a set of tools that can be used to send, collect, and aggregate custom metrics from any application. Originally, StatsD referred to a daemon written by [Etsy](https://www.etsy.com) in Node." + } + stdin: { + name: "STDIN" + thing: "the STDIN stream" + url: "https://en.wikipedia.org/wiki/Standard_streams#Standard_input_(stdin)" + versions: null + } + gcp_cloud_storage: { + name: "GCP Cloud Storage" + thing: "a GCP Cloud Storage bucket" + url: "https://cloud.google.com/storage" + versions: null + description: "[Google Cloud Storage][urls.gcp_cloud_storage] is a RESTful online file storage web service for storing and accessing data on Google Cloud Platform infrastructure. The service combines the performance and scalability of Google's cloud with advanced security and sharing capabilities. This makes it a prime candidate for log data." + } + socket_client: { + name: "socket client" + thing: "a socket client" + url: "https://en.wikipedia.org/wiki/Network_socket" + versions: null + } + statsd: { + name: "StatsD" + thing: "StatsD" + url: "https://github.com/statsd/statsd" + versions: null + } + journald: { + name: "JournalD" + thing: "JournalD" + url: "https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html" + versions: null + description: "[Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) is a utility for accessing log data across a variety of system services. It was introduced with [Systemd](https://systemd.io/) to help system administrators collect, access, and route log data." + } + vector: { + name: "Vector" + thing: "a Vector instance" + url: "https://vector.dev/docs/" + versions: ">= 0.11.0" + connect_to: { + splunk: { + logs: { + setup: [{ + title: "Create a Splunk HEC endpoint" + description: "Follow the Splunk HEC setup docs to create a Splunk HEC endpoint." + detour: { + url: "https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector" + } + }, { + title: "Configure Vector" + description: """ + Splunk will provide you with a host and token. Copy those + values to the `host` and `token` options. + """ + vector: { + configure: { + sinks: { + splunk_hec: { + type: "splunk_hec" + host: "" + token: "" + } + } + } + } + }] + } + } + } + } + pulsar: { + name: "Apache Pulsar" + thing: "an Apache Pulsar cluster" + url: "https://pulsar.apache.org/" + versions: null + description: "[Pulsar](https://pulsar.apache.org/) is a multi-tenant, high-performance solution for server-to-server messaging. Pulsar was originally developed by Yahoo, it is under the stewardship of the Apache Software Foundation. It is an excellent tool for streaming logs and metrics data." + } + clickhouse: { + name: "Clickhouse" + thing: "a Clickhouse database" + url: "https://clickhouse.yandex/" + versions: null + description: "[ClickHouse](https://clickhouse.yandex/) is an open-source column-oriented database management system that manages extremely large volumes of data, including non-aggregated data, in a stable and sustainable manner and allows generating custom data reports in real time. The system is linearly scalable and can be scaled up to store and process trillions of rows and petabytes of data. This makes it an best-in-class storage for logs and metrics data." + } + honeycomb: { + name: "Honeycomb" + thing: "a Honeycomb dataset" + url: "https://honeycomb.io" + versions: null + description: "[Honeycomb](https://honeycomb.io) provides full stack observability—designed for high cardinality data and collaborative problem solving, enabling engineers to deeply understand and debug production software together." + } + apache_http: { + name: "Apache HTTP server (HTTPD)" + thing: "an Apache HTTP server (HTTPD)" + url: "https://httpd.apache.org" + versions: null + setup: [{ + title: "Install Apache HTTP" + description: "Install Apache HTTP by following their installation instructions." + detour: { + url: "https://httpd.apache.org/docs/current/install.html" + } + }] + connect_to: { + vector: { + metrics: { + setup: [{ + title: "Enable the Apache Status Module" + description: """ + Enable the [Apache Status module](http://httpd.apache.org/docs/current/mod/mod_status.html) + in your Apache config. + + ```text file="/etc/apache2/httpd.conf" + # ... + + + SetHandler server-status + Require host example.com + + + # ... + ``` + """ + }, { + title: "Optionally enable ExtendedStatus" + description: """ + Optionally enable [`ExtendedStatus` option](https://httpd.apache.org/docs/current/mod/core.html#extendedstatus) + for more detailed metrics. + + ```text file="/etc/apache2/httpd.conf" + # ... + + ExtendedStatus On + + # ... + ``` + """ + notes: ["This defaults to `On` in Apache >= 2.3.6."] + }, { + title: "Apply the Apache config changes" + description: "Start or reload Apache to apply the config changes." + }] + } + } + } + } +} +#Remap: { + #Characteristic: { + anchor: string + name: string + title: string + description: string + } + #Characteristics: {} + #Example: { + title: string + source: string + } + #Type: "any" | "array" | "boolean" | "float" | "integer" | "object" | "null" | "path" | "string" | "regex" | "timestamp" + concepts: _ + description: string + errors: _ + examples: [{ + title: string + source: string + }] + expressions: _ + features: _ + functions: _ + literals: _ + principles: _ + syntax: _ +} +urls: { + affine_type_system: "https://en.wikipedia.org/wiki/Substructural_type_system#Affine_type_systems" + adaptive_request_concurrency_post: "/blog/adaptive-request-concurrency/" + amazon_linux: "https://aws.amazon.com/amazon-linux-ami/" + ansi_escape_codes: "https://en.wikipedia.org/wiki/ANSI_escape_code" + apache: "https://httpd.apache.org" + apache_common: "https://httpd.apache.org/docs/1.3/logs.html#common" + apache_error: "https://httpd.apache.org/docs/1.3/logs.html#errorlog" + apache_extended_status: "https://httpd.apache.org/docs/current/mod/core.html#extendedstatus" + apache_install: "https://httpd.apache.org/docs/current/install.html" + apache_mod_status: "http://httpd.apache.org/docs/current/mod/mod_status.html" + apt: "https://en.wikipedia.org/wiki/APT_(software)" + arm: "https://en.wikipedia.org/wiki/ARM_architecture" + aws_access_keys: "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html" + aws_arm_g2_announcement: "https://aws.amazon.com/about-aws/whats-new/2019/12/announcing-new-amazon-ec2-m6g-c6g-and-r6g-instances-powered-by-next-generation-arm-based-aws-graviton2-processors/" + aws_athena: "https://aws.amazon.com/athena/" + aws_athena_console: "https://console.aws.amazon.com/athena/home" + aws_canonical_user_id: "https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId" + aws_cloudwatch: "https://aws.amazon.com/cloudwatch/" + aws_cloudwatch_logs: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html" + aws_cloudwatch_logs_api: "https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html" + aws_cloudwatch_logs_group_name: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html" + aws_cloudwatch_logs_service_limits: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/cloudwatch_limits_cwl.html" + aws_cloudwatch_logs_sink_source: "https://github.com/timberio/vector/blob/master/src/sinks/aws_cloudwatch_logs/" + aws_cloudwatch_logs_stream_name: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html" + aws_cloudwatch_logs_regions: "https://docs.aws.amazon.com/general/latest/gr/rande.html#cwl_region" + aws_cloudwatch_logs_s3_export: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/S3Export.html" + aws_cloudwatch_logs_subscriptions: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html" + aws_cloudwatch_logs_subscriptions_firehose: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/SubscriptionFilters.html#FirehoseExample" + aws_cloudwatch_metrics: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/working_with_metrics.html" + aws_cloudwatch_metrics_api: "https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html" + aws_cloudwatch_metrics_service_limits: "https://docs.aws.amazon.com/en_pv/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html" + aws_cloudwatch_metrics_regions: "https://docs.aws.amazon.com/general/latest/gr/rande.html#cw_region" + aws_credential_process: "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sourcing-external.html" + aws_credentials_file: "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html" + aws_docs: "https://docs.aws.amazon.com" + aws_elasticsearch: "https://aws.amazon.com/elasticsearch-service/" + aws_elasticsearch_regions: "https://docs.aws.amazon.com/general/latest/gr/rande.html#elasticsearch-service-regions" + aws_ec2_instance_metadata: "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html" + aws_ecs: "https://aws.amazon.com/ecs/" + aws_ecs_task_metadata: "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html" + aws_elb: "https://aws.amazon.com/elasticloadbalancing/" + aws_elb_access_format: "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html#access-log-entry-examples" + aws_elb_https: "https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html" + aws_iam: "https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html" + aws_iam_role: "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html" + aws_imds_v1_security_problems: "https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service/" + aws_kinesis_firehose: "https://aws.amazon.com/kinesis/data-firehose/" + aws_kinesis_firehose_http_protocol: "https://docs.aws.amazon.com/firehose/latest/dev/create-destination.html#create-destination-http" + aws_firehose_http_request_spec: "https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html" + aws_kinesis_firehose_api: "https://docs.aws.amazon.com/firehose/latest/APIReference/API_PutRecordBatch.html" + aws_kinesis_firehose_service_limits: "https://docs.aws.amazon.com/firehose/latest/dev/limits.html" + aws_kinesis_firehose_http_setup: "https://aws.amazon.com/blogs/big-data/stream-data-to-an-http-endpoint-with-amazon-kinesis-data-firehose/" + aws_kinesis_partition_key: "https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecordsRequestEntry.html#Streams-Type-PutRecordsRequestEntry-PartitionKey" + aws_kinesis_streams: "https://aws.amazon.com/kinesis/data-streams/" + aws_kinesis_streams_api: "https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecords.html" + aws_kinesis_streams_service_limits: "https://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html" + aws_kinesis_split_shards: "https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-resharding-split.html" + aws_regions: "https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html" + aws_s3: "https://aws.amazon.com/s3/" + aws_s3_acl: "https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html" + aws_s3_bucket_notifications_to_sqs: "https://docs.aws.amazon.com/AmazonS3/latest/dev/ways-to-add-notification-config-to-bucket.html" + aws_s3_canned_acl: "https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl" + aws_s3_cross_account_tutorial: "https://docs.aws.amazon.com/AmazonS3/latest/dev/example-walkthroughs-managing-access-example3.html" + aws_s3_endpoints: "https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_endpoint" + aws_s3_grantee: "https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee" + aws_s3_metadata: "https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-metadata" + aws_s3_regions: "https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region" + aws_s3_server_access_logs: "https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html" + aws_s3_service_limits: "https://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html" + aws_s3_sse: "https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html" + aws_s3_storage_classes: "https://aws.amazon.com/s3/storage-classes/" + aws_s3_tags: "https://docs.aws.amazon.com/AmazonS3/latest/user-guide/add-object-tags.html" + aws_sqs: "https://aws.amazon.com/sqs/" + aws_sqs_api: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/Welcome.html" + aws_sqs_create: "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-configure-create-queue.html" + aws_vpc_flow_logs: "https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html" + azure_monitor: "https://azure.microsoft.com/en-us/services/monitor/" + azure_monitor_logs_endpoints: "https://docs.microsoft.com/en-us/rest/api/monitor/" + base64: "https://en.wikipedia.org/wiki/Base64" + base64_padding: "https://en.wikipedia.org/wiki/Base64#Output_padding" + base64_standard: "https://tools.ietf.org/html/rfc4648#section-4" + base64_url_safe: "https://en.wikipedia.org/wiki/Base64#URL_applications" + basic_auth: "https://en.wikipedia.org/wiki/Basic_access_authentication" + big_query_streaming: "https://cloud.google.com/bigquery/streaming-data-into-bigquery" + b_tree_map: "https://doc.rust-lang.org/std/collections/struct.BTreeMap.html" + cargo_audit: "https://github.com/RustSec/cargo-audit" + centos: "https://www.centos.org/" + chrono_time_formats: "https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers" + cgroups_limit_resources: "https://the.binbashtheory.com/control-resources-cgroups/" + clickhouse: "https://clickhouse.yandex/" + clickhouse_http: "https://clickhouse.yandex/docs/en/interfaces/http/" + cloudsmith: "https://cloudsmith.io/~timber/repos/vector/packages/" + cloudsmith_apt: "https://cloudsmith.io/~timber/repos/vector/setup/#formats-deb" + cloudsmith_yum: "https://cloudsmith.io/~timber/repos/vector/setup/#formats-rpm" + console: "https://en.wikipedia.org/wiki/System_console" + conventional_commits: "https://www.conventionalcommits.org" + contributing: "https://github.com/timberio/vector/blob/master/CONTRIBUTING.md#setup" + crc: "https://en.wikipedia.org/wiki/Cyclic_redundancy_check" + ctime: "https://www.cplusplus.com/reference/ctime" + cue: "https://cuelang.org/" + dag: "https://en.wikipedia.org/wiki/Directed_acyclic_graph" + datadog: "https://www.datadoghq.com" + datadog_distribution: "https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition" + datadog_docs: "https://docs.datadoghq.com" + datadog_logs: "https://docs.datadoghq.com/logs/" + datadog_logs_endpoints: "https://docs.datadoghq.com/logs/log_collection/?tab=http#datadog-logs-endpoints" + datadog_metrics: "https://docs.datadoghq.com/metrics/" + datadog_metrics_endpoints: "https://docs.datadoghq.com/api/v1/metrics/" + date: "https://man7.org/linux/man-pages/man1/date.1.html" + debian: "https://www.debian.org/" + default_configuration: "https://github.com/timberio/vector/blob/master/config/vector.toml" + docker: "https://www.docker.com/" + docker_alpine: "https://hub.docker.com/_/alpine" + docker_cli: "https://docs.docker.com/engine/reference/commandline/cli/" + docker_debian: "https://hub.docker.com/_/debian" + docker_daemon: "https://docs.docker.com/engine/docker-overview/#the-docker-daemon" + docker_daemon_socket_option: "https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option" + docker_docs: "https://docs.docker.com" + docker_dns: "https://docs.docker.com/config/containers/container-networking/#dns-services" + docker_engine: "https://docs.docker.com/engine/" + docker_engine_api: "https://docs.docker.com/engine/api/" + docker_env_vars: "https://docs.docker.com/engine/reference/commandline/cli/#environment-variables" + docker_networking: "https://docs.docker.com/network/network-tutorial-host/" + docker_hub: "https://hub.docker.com" + docker_hub_vector: "https://hub.docker.com/r/timberio/vector" + docker_logging_driver_journald: "https://docs.docker.com/config/containers/logging/journald/" + docker_logging_driver_json_file: "https://docs.docker.com/config/containers/logging/json-file/" + docker_logging_driver_local: "https://docs.docker.com/config/containers/logging/local/" + docker_logging_driver_splunk: "https://docs.docker.com/config/containers/logging/splunk/" + docker_logging_driver_syslog: "https://docs.docker.com/config/containers/logging/syslog/" + docker_logging_drivers: "https://docs.docker.com/config/containers/logging/configure/" + docker_object_labels: "https://docs.docker.com/config/labels-custom-metadata/" + docker_setup: "https://docs.docker.com/get-docker/" + dockerfile: "https://github.com/timberio/vector/blob/master/Dockerfile" + dogstatsd: "https://docs.datadoghq.com/developers/dogstatsd/?tab=hostagent" + dpkg: "https://wiki.debian.org/dpkg" + dry_code: "https://en.wikipedia.org/wiki/Don%27t_repeat_yourself" + cidr: "https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing" + elasticsearch: "https://www.elastic.co/products/elasticsearch" + elasticsearch_bulk: "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html" + elasticsearch_data_streams: "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" + elasticsearch_id_field: "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-id-field.html" + elasticsearch_id_performance: "https://www.elastic.co/guide/en/elasticsearch/reference/master/tune-for-indexing-speed.html#_use_auto_generated_ids" + elasticsearch_ignore_malformed: "https://www.elastic.co/guide/en/elasticsearch/reference/current/ignore-malformed.html" + encoding_charset_labels: "https://encoding.spec.whatwg.org/#concept-encoding-get" + encoding_standard: "https://encoding.spec.whatwg.org/" + endler_dev: "https://endler.dev/" + etsy: "https://www.etsy.com" + event_proto: "https://github.com/timberio/vector/blob/master/proto/event.proto" + exit_codes: "https://docs.rs/exitcode/latest/exitcode/#constants" + externally_tagged_representation: "https://serde.rs/enum-representations.html#externally-tagged" + fail_safe: "https://en.wikipedia.org/wiki/Fail-safe" + ffi: "https://en.wikipedia.org/wiki/Foreign_function_interface" + file: "https://en.wikipedia.org/wiki/Computer_file" + file_system: "https://en.wikipedia.org/wiki/File_system" + freebsd: "https://www.freebsd.org/" + gcp_authentication: "https://cloud.google.com/docs/authentication/" + gcp_authentication_api_key: "https://cloud.google.com/docs/authentication/api-keys" + gcp_authentication_server_to_server: "https://cloud.google.com/docs/authentication/production" + gcp_authentication_service_account: "https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually" + gcp_cloud_storage: "https://cloud.google.com/storage" + gcp_folders: "https://cloud.google.com/resource-manager/docs/creating-managing-folders" + gcp_pubsub: "https://cloud.google.com/pubsub/" + gcp_pubsub_rest: "https://cloud.google.com/pubsub/docs/reference/rest/" + gcp_projects: "https://cloud.google.com/resource-manager/docs/creating-managing-projects" + gcp_resources: "https://cloud.google.com/monitoring/api/resources" + gcp_stackdriver: "https://cloud.google.com/products/operations" + gcp_stackdriver_logging: "https://cloud.google.com/logging/docs/reference/v2/rest/" + gcp_stackdriver_logging_rest: "https://cloud.google.com/logging/" + gcp_stackdriver_severity: "https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#logseverity" + gcp_xml_interface: "https://cloud.google.com/storage/docs/xml-api/overview" + gcs_predefined_acl: "https://cloud.google.com/storage/docs/access-control/lists#predefined-acl" + gcs_storage_classes: "https://cloud.google.com/storage/docs/storage-classes" + gcs_custom_metadata: "https://cloud.google.com/storage/docs/metadata#custom-metadata" + git: "https://git-scm.com/" + github: "https://github.com" + github_protected_branches: "https://help.github.com/en/github/administering-a-repository/about-protected-branches" + github_sign_commits: "https://help.github.com/en/github/authenticating-to-github/signing-commits" + globbing: "https://en.wikipedia.org/wiki/Glob_(programming)" + glog: "https://github.com/google/glog" + graphql: "https://graphql.org" + graphql_playground: "https://github.com/graphql/graphql-playground" + grok: "https://grokdebug.herokuapp.com/" + grok_debugger: "https://grokdebug.herokuapp.com/" + grok_patterns: "https://github.com/daschl/grok/tree/master/patterns" + gzip: "https://www.gzip.org/" + haproxy: "https://www.haproxy.org/" + helm: "https://helm.sh/" + heroku: "https://www.heroku.com" + heroku_http_log_drain: "https://devcenter.heroku.com/articles/log-drains#https-drains" + heroku_start: "https://devcenter.heroku.com/start" + homebrew: "https://brew.sh/" + homebrew_services: "https://github.com/Homebrew/homebrew-services" + honeycomb: "https://honeycomb.io" + honeycomb_batch: "https://docs.honeycomb.io/api/events/#batched-events" + honeycomb_signup: "https://ui.honeycomb.io/signup" + host: "https://en.wikipedia.org/wiki/Host_(network)" + http: "https://www.w3.org/Protocols/" + http_client: "https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Client_request" + http_server: "https://en.wikipedia.org/wiki/Web_server" + humio: "https://humio.com" + humio_hec: "https://docs.humio.com/integrations/data-shippers/hec/" + iam_instance_profile: "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html" + iana_time_zone_format: "https://en.wikipedia.org/wiki/Tz_database#Names_of_time_zones" + iana_time_zones: "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones" + ieee_754: "https://en.wikipedia.org/wiki/IEEE_754" + ietf_rfc_6750: "https://tools.ietf.org/html/rfc6750" + initd: "https://bash.cyberciti.biz/guide//etc/init.d" + influxdb: "https://www.influxdata.com/products/influxdb-overview/" + influxdb_http_api_v1: "https://docs.influxdata.com/influxdb/latest/tools/api/#write-http-endpoint" + influxdb_http_api_v2: "https://v2.docs.influxdata.com/v2.0/api/#tag/Write" + influxdb_authentication_token: "https://v2.docs.influxdata.com/v2.0/security/tokens/" + influxdb_line_protocol: "https://v2.docs.influxdata.com/v2.0/reference/syntax/line-protocol/" + inode: "https://en.wikipedia.org/wiki/Inode" + iso_8601: "https://en.wikipedia.org/wiki/ISO_8601" + iso3166_2: "https://en.wikipedia.org/wiki/ISO_3166-2" + issue_1694: "https://github.com/timberio/vector/issues/1694" + jemalloc: "https://github.com/jemalloc/jemalloc" + journald: "https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html" + json: "https://en.wikipedia.org/wiki/JSON" + json_types: "https://en.wikipedia.org/wiki/JSON#Data_types_and_syntax" + jsonnet: "https://jsonnet.org/" + kafka: "https://kafka.apache.org/" + kafka_partitioning_docs: "https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Partitioningandbootstrapping" + kafka_protocol: "https://kafka.apache.org/protocol" + kafka_sasl: "https://docs.confluent.io/current/kafka/authentication_sasl/index.html" + kubectl: "https://kubernetes.io/docs/reference/kubectl/overview/" + kubernetes: "https://kubernetes.io" + kubernetes_accessing_api_from_pod: "https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod" + kubernetes_api: "https://kubernetes.io/docs/concepts/overview/kubernetes-api/" + kubernetes_api_server: "https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/" + kubernetes_authorization: "https://kubernetes.io/docs/reference/access-authn-authz/authorization/" + kubernetes_daemonset: "https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/" + kubernetes_example_daemonset: "https://github.com/timberio/vector/blob/master/config/kubernetes/vector-daemonset.yaml" + kubernetes_limit_resources: "https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/" + kubernetes_logging_architecture: "https://kubernetes.io/docs/concepts/cluster-administration/logging/" + kubernetes_rbac: "https://kubernetes.io/docs/reference/access-authn-authz/rbac/" + kubernetes_request_verbs: "https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb" + kubernetes_watch_api: "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#watch-30" + leveldb: "https://github.com/google/leveldb" + leveldb_sys_2: "https://crates.io/crates/leveldb-sys" + leveldb_sys_3: "https://github.com/timberio/leveldb-sys/tree/v3.0.0" + librdkafka: "https://github.com/edenhill/librdkafka" + librdkafka_config: "https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md" + logdna: "https://logdna.com/" + logfmt: "https://brandur.org/logfmt" + loki: "https://grafana.com/oss/loki/" + loki_multi_tenancy: "https://github.com/grafana/loki/blob/master/docs/operations/multi-tenancy.md" + log_event_source: "https://github.com/timberio/vector/blob/master/src/event/" + logplex: "https://devcenter.heroku.com/articles/logplex" + logplex_protocol: "https://github.com/heroku/logplex/blob/master/doc/README.http_drains.md" + lua: "https://www.lua.org" + lua_boolean: "https://www.lua.org/pil/2.2.html" + lua_csv_repo: "https://github.com/geoffleyland/lua-csv" + lua_csv_view: "https://github.com/geoffleyland/lua-csv/blob/09557e4608b02d136b9ae39a8fa0f36328fa1cec/lua/csv.lua" + lua_csv_raw: "https://raw.githubusercontent.com/geoffleyland/lua-csv/d20cd42d61dc52e7f6bcb13b596ac7a7d4282fbf/lua/csv.lua" + lua_integer: "https://docs.rs/rlua/latest/rlua/type.Integer.html" + lua_manual: "https://www.lua.org/manual/5.3/manual.html" + lua_modules: "https://www.lua.org/manual/5.3/manual.html#6.3" + lua_modules_tutorial: "http://lua-users.org/wiki/ModulesTutorial" + lua_number: "https://docs.rs/rlua/latest/rlua/type.Number.html" + lua_os_date: "https://www.lua.org/manual/5.3/manual.html#pdf-os.date" + lua_os_time: "https://www.lua.org/manual/5.3/manual.html#pdf-os.time" + lua_pairs: "https://www.lua.org/manual/5.3/manual.html#pdf-pairs" + lua_pil: "https://www.lua.org/pil/" + lua_require: "https://www.lua.org/manual/5.3/manual.html#pdf-require" + lua_table: "https://www.lua.org/pil/2.5.html" + lua_sequence: "https://www.lua.org/pil/11.1.html" + lua_string: "https://www.lua.org/pil/2.4.html" + lua_tonumber: "https://www.lua.org/manual/5.3/manual.html#pdf-tonumber" + lz4: "https://lz4.github.io/lz4/" + macos: "https://en.wikipedia.org/wiki/MacOS" + mailing_list: "https://vector.dev/community/" + maxmind: "https://www.maxmind.com/en/home" + maxmind_db_file_format: "https://maxmind.github.io/MaxMind-DB/" + maxmind_geoip2: "https://dev.maxmind.com/geoip/geoip2/downloadable" + maxmind_geoip2_city: "https://www.maxmind.com/en/geoip2-city" + maxmind_geoip2_isp: "https://www.maxmind.com/en/geoip2-isp-database" + maxmind_geolite2_asn: "https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access" + maxmind_geolite2_city: "https://dev.maxmind.com/geoip/geoip2/geolite2/#Download_Access" + memory_safety: "https://en.wikipedia.org/wiki/Memory_safety" + memory_safety_bugs: "https://thenewstack.io/microsoft-rust-is-the-industrys-best-chance-at-safe-systems-programming/" + metric_event_source: "https://github.com/timberio/vector/blob/master/src/event/metric.rs" + mongodb: "https://www.mongodb.com" + mongodb_command_server_status: "https://docs.mongodb.com/manual/reference/command/serverStatus/" + mongodb_connection_string_uri_format: "https://docs.mongodb.com/manual/reference/connection-string/" + musl_builder_docker_image: "https://github.com/timberio/vector/blob/master/scripts/ci-docker-images/builder-x86_64-unknown-linux-musl/Dockerfile" + nats: "https://nats.io/" + new_bug_report: "https://github.com/timberio/vector/issues/new?labels=type%3A+bug" + new_feature_request: "https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature" + new_relic: "https://newrelic.com/" + new_relic_log_api: "https://docs.newrelic.com/docs/logs/new-relic-logs/log-api/introduction-log-api" + new_security_report: "https://github.com/timberio/vector/issues/new?labels=domain%3A+security" + new_sink: "https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature" + new_source: "https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature" + new_target: "https://github.com/timberio/vector/issues/new?labels=type%3A+task&labels=domain%3A+operations" + new_transform: "https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature" + nginx: "https://www.nginx.com/" + nginx_stub_status_module: "http://nginx.org/en/docs/http/ngx_http_stub_status_module.html" + nix: "https://nixos.org/nix/" + nixos: "https://nixos.org/" + nixpkgs_9682: "https://github.com/NixOS/nixpkgs/issues/9682" + openssl: "https://www.openssl.org/" + order_of_ops: "https://en.wikipedia.org/wiki/Order_of_operations" + papertrail: "https://www.papertrail.com/" + papertrail_syslog: "https://help.papertrailapp.com/kb/how-it-works/http-api/#submitting-log-messages" + perl_windows: "https://www.perl.org/get.html#win32" + postgresql: "https://www.postgresql.org/" + postgresql_csvlog: "https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-CSVLOG" + postgresql_matching: "https://www.postgresql.org/docs/current/functions-matching.html#FUNCTIONS-POSIX-REGEXP" + prometheus: "https://prometheus.io/" + prometheus_client: "https://prometheus.io/docs/instrumenting/clientlibs/" + prometheus_counter: "https://prometheus.io/docs/concepts/metric_types/#counter" + prometheus_gauge: "https://prometheus.io/docs/concepts/metric_types/#gauge" + prometheus_high_cardinality: "https://prometheus.io/docs/practices/naming/#labels" + prometheus_histogram: "https://prometheus.io/docs/concepts/metric_types/#histogram" + prometheus_histograms_guide: "https://prometheus.io/docs/practices/histograms/" + prometheus_summary: "https://prometheus.io/docs/concepts/metric_types/#summary" + prometheus_text_based_exposition_format: "https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format" + prometheus_metric_naming: "https://prometheus.io/docs/practices/naming/#metric-names" + prometheus_remote_integrations: "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage" + prometheus_remote_write: "https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write" + protobuf: "https://developers.google.com/protocol-buffers" + pulsar: "https://pulsar.apache.org/" + pulsar_protocol: "https://pulsar.apache.org/docs/en/develop-binary-protocol/" + raspbian: "https://www.raspbian.org/" + rdkafka: "https://github.com/edenhill/librdkafka" + regex: "https://en.wikipedia.org/wiki/Regular_expression" + regex_grouping_and_flags: "https://docs.rs/regex/latest/regex/#grouping-and-flags" + regex_tester: "https://rustexp.lpil.uk/" + rfc_822: "https://tools.ietf.org/html/rfc822#section-5" + rfc_2064: "https://github.com/timberio/vector/blob/master/rfcs/2020-03-17-2064-event-driven-observability.md" + rfc_2822: "https://tools.ietf.org/html/rfc2822#section-3.3" + rfc_3339: "https://tools.ietf.org/html/rfc3339" + rfc_4180: "https://tools.ietf.org/html/rfc4180" + rhel: "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux" + rlua: "https://github.com/kyren/rlua" + rpm: "https://rpm.org/" + rust: "https://www.rust-lang.org/" + rust_date_time: "https://docs.rs/chrono/latest/chrono/struct.DateTime.html" + rust_grok_library: "https://github.com/daschl/grok" + rust_k8s_openapi_crate: "https://docs.rs/k8s-openapi/" + rust_memory_safety: "https://hacks.mozilla.org/2019/01/fearless-security-memory-safety" + rust_regex_syntax: "https://docs.rs/regex/latest/regex/#syntax" + rust_sec: "https://rustsec.org/" + rust_subprocess: "https://docs.rs/subprocess" + rust_target_triples: "https://forge.rust-lang.org/platform-support.html" + rust_tier_1_platform: "https://forge.rust-lang.org/release/platform-support.html#tier-1" + rust_tokio: "https://github.com/tokio-rs/tokio" + rustup: "https://rustup.rs" + sematext: "https://sematext.com" + sematext_create_logs_app: "https://apps.sematext.com/ui/integrations" + sematext_es: "https://sematext.com/docs/logs/index-events-via-elasticsearch-api/" + sematext_logsense: "http://www.sematext.com/logsene/" + sematext_monitoring: "https://sematext.com/docs/monitoring/" + sematext_registration: "https://apps.sematext.com/ui/registration" + semver: "https://semver.org/" + sha1: "https://en.wikipedia.org/wiki/SHA-1" + sha2: "https://en.wikipedia.org/wiki/SHA-2" + sha3: "https://en.wikipedia.org/wiki/SHA-3" + snake_case: "https://en.wikipedia.org/wiki/Snake_case" + snappy: "https://google.github.io/snappy/" + socket: "https://en.wikipedia.org/wiki/Network_socket" + splunk: "https://www.splunk.com" + splunk_hec: "https://dev.splunk.com/enterprise/docs/dataapps/httpeventcollector/" + splunk_hec_event_endpoint: "https://docs.splunk.com/Documentation/Splunk/8.0.0/RESTREF/RESTinput#services.2Fcollector.2Fevent" + splunk_hec_indexed_fields: "https://docs.splunk.com/Documentation/Splunk/8.0.0/Data/IFXandHEC" + splunk_hec_protocol: "https://docs.splunk.com/Documentation/Splunk/8.0.0/Data/HECRESTendpoints" + splunk_hec_raw_endpoint: "https://docs.splunk.com/Documentation/Splunk/8.0.0/RESTREF/RESTinput#services.2Fcollector.2Fraw" + splunk_hec_setup: "https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector" + standard_streams: "https://en.wikipedia.org/wiki/Standard_streams" + statsd: "https://github.com/statsd/statsd" + statsd_multi: "https://github.com/statsd/statsd/blob/master/docs/metric_types.md#multi-metric-packets" + statsd_set: "https://github.com/statsd/statsd/blob/master/docs/metric_types.md#sets" + statsd_udp_protocol: "https://github.com/b/statsd_spec" + stderr: "https://en.wikipedia.org/wiki/Standard_streams#Standard_error_(stderr)" + stdin: "https://en.wikipedia.org/wiki/Standard_streams#Standard_input_(stdin)" + stdout: "https://en.wikipedia.org/wiki/Standard_streams#Standard_output_(stdout)" + stripe_blog_canonical_log_lines: "https://stripe.com/blog/canonical-log-lines" + strptime_specifiers: "https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers" + syslog: "https://en.wikipedia.org/wiki/Syslog" + syslog_3164: "https://tools.ietf.org/html/rfc3164" + syslog_5424: "https://tools.ietf.org/html/rfc5424" + syslog_6587: "https://tools.ietf.org/html/rfc6587" + syslog_facility: "https://en.wikipedia.org/wiki/Syslog#Facility" + syslog_levels: "https://en.wikipedia.org/wiki/Syslog#Severity_level" + systemd: "https://systemd.io/" + systemd_limit_resources: "https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html" + systemd_resolved: "https://wiki.archlinux.org/index.php/Systemd-resolved" + tcp: "https://en.wikipedia.org/wiki/Transmission_Control_Protocol" + team: "/community#team" + timber: "https://timber.io" + toml: "https://github.com/toml-lang/toml" + toml_array: "https://github.com/toml-lang/toml#array" + toml_table: "https://github.com/toml-lang/toml#table" + toml_types: "https://github.com/toml-lang/toml#table-of-contents" + twelve_factor_app: "https://12factor.net/" + type_safety: "https://en.wikipedia.org/wiki/Type_safety" + tz_time_zones: "https://en.wikipedia.org/wiki/List_of_tz_database_time_zones" + ubuntu: "https://ubuntu.com/" + udp: "https://en.wikipedia.org/wiki/User_Datagram_Protocol" + uds: "https://en.wikipedia.org/wiki/Unix_domain_socket" + unicode_replacement_character: "https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character" + unicode_whitespace: "https://en.wikipedia.org/wiki/Unicode_character_property#Whitespace" + unix_timestamp: "https://en.wikipedia.org/wiki/Unix_time" + utf8: "https://en.wikipedia.org/wiki/UTF-8" + uuidv4: "https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)" + url: "https://en.wikipedia.org/wiki/URL" + vector_aggregator_role: "https://vector.dev/docs/setup/deployment/roles/#aggregator" + vector_blog: "https://vector.dev/blog/" + vector_bug_issues: "https://github.com/timberio/vector/issues?q=is%3Aopen+is%3Aissue+label%3A%22type%3A+bug%22" + vector_changelog: "https://github.com/timberio/vector/blob/master/CHANGELOG.md" + vector_chat: "https://chat.vector.dev" + vector_code_of_conduct: "https://github.com/timberio/vector/blob/master/CODE_OF_CONDUCT.md" + vector_community: "https://vector.dev/community/" + vector_components: "https://vector.dev/components/" + vector_configuration: "https://vector.dev/docs/configuration/" + vector_data_model: "https://vector.dev/docs/about/under-the-hood/architecture/data-model/" + vector_debian_source_files: "https://github.com/timberio/vector/tree/master/distribution/debian" + vector_dedupe_transform: "https://vector.dev/docs/reference/transforms/dedupe/" + vector_docker_logs_source: "https://vector.dev/docs/reference/sources/docker_logs/" + vector_docker_source_files: "https://github.com/timberio/vector/tree/master/distribution/docker" + vector_docs: "https://vector.dev/docs/" + vector_download: "https://vector.dev/releases/latest/download/" + vector_download_nightly: "https://vector.dev/releases/nightly/download/" + vector_enriching_transforms: "https://vector.dev/components/?functions%5B%5D=enrich" + vector_file_source: "https://vector.dev/docs/reference/sources/file/" + vector_generate_arguments_issue: "https://github.com/timberio/vector/issues/1966" + vector_guides: "https://vector.dev/guides/" + vector_glibc_benchmarks: "https://github.com/timberio/vector/issues/2313" + vector_graphql_playground: "https://playground.vector.dev:8686/playground" + vector_highlights: "https://vector.dev/highlights/" + vector_host_metrics_source: "https://vector.dev/docs/reference/sources/host_metrics/" + vector_http_auth_token: "https://vector.dev/docs/reference/sinks/http/#token" + vector_homebrew: "https://vector.dev/docs/setup/installation/package-managers/homebrew/" + vector_homebrew_source_files: "https://github.com/timberio/homebrew-brew/blob/master/Formula/vector.rb" + vector_http_source: "https://vector.dev/docs/reference/sources/http/" + vector_initd_service: "https://github.com/timberio/vector/blob/master/distribution/init.d/vector" + vector_installer: "https://sh.vector.dev" + vector_issues: "https://github.com/timberio/vector/issues" + vector_journald_source: "https://vector.dev/docs/reference/sources/journald/" + vector_kubernetes_logs_source: "https://vector.dev/docs/reference/sources/kubernetes_logs/" + vector_level_up: "https://vector.dev/guides/level-up/" + vector_log: "https://vector.dev/docs/about/data-model/log/" + vector_log_data_types: "https://vector.dev/docs/about/data-model/log/#types" + vector_lua_rfc: "https://github.com/timberio/vector/blob/master/rfcs/2020-03-06-1999-api-extensions-for-lua-transform.md" + vector_metric: "https://vector.dev/docs/about/data-model/metric/" + vector_msi_source_files: "https://github.com/timberio/vector/tree/master/distribution/msi" + vector_nightly_builds: "https://packages.timber.io/vector/nightly/latest/" + vector_nix_package: "https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/vector/default.nix" + vector_parsing_transforms: "https://vector.dev/components/?functions%5B%5D=parse" + vector_performance: "https://vector.dev/#performance" + vector_privacy_policy: "https://github.com/timberio/vector/blob/master/PRIVACY.md" + vector_programmable_transforms: "https://vector.dev/components/?functions%5B%5D=program" + vector_pull_requests: "https://github.com/timberio/vector/pulls" + vector_receiving_sources: "https://vector.dev/components/?functions%5B%5D=receive" + vector_regex_parser: "https://vector.dev/docs/reference/transforms/regex_parser/" + vector_releases: "https://vector.dev/releases/latest/" + vector_reduce_transform: "https://vector.dev/docs/reference/transforms/reduce/" + vector_remap_transform: "https://vector.dev/docs/reference/transforms/remap/" + vector_repo: "https://github.com/timberio/vector" + vector_roadmap: "https://roadmap.vector.dev" + vector_rpm_source_files: "https://github.com/timberio/vector/tree/master/distribution/rpm" + vector_security_policy: "https://github.com/timberio/vector/security/policy" + vector_semantic_yml: "https://github.com/timberio/vector/blob/master/.github/semantic.yml" + vector_sink: "https://vector.dev/docs/reference/sinks/vector" + vector_sinks: "https://vector.dev/docs/reference/sinks/" + vector_sink_http_batch: "https://vector.dev/docs/reference/sinks/http/#batch" + vector_sink_kafka_sasl: "https://vector.dev/docs/reference/sources/kafka/#sasl" + vector_socket_source: "https://vector.dev/docs/reference/sources/socket/" + vector_source: "https://vector.dev/docs/reference/sources/vector/" + vector_sources: "https://vector.dev/docs/reference/sources/" + vector_stars: "https://github.com/timberio/vector/stargazers" + vector_stdin_source: "https://vector.dev/docs/reference/sources/stdin/" + vector_systemd_file: "https://github.com/timberio/vector/blob/master/distribution/systemd/vector.service" + vector_test_harness: "https://github.com/timberio/vector-test-harness/" + vector_transform_aws_cloudwatch_logs_subscription_parser: "https://vector.dev/docs/reference/transforms/aws_cloudwatch_logs_subscription_parser" + vector_transforms: "https://vector.dev/docs/reference/transforms/" + vector_twitter: "https://twitter.com/vectordotdev" + vector_version_branches: "https://github.com/timberio/vector/branches/all?query=v" + vector_website: "https://vector.dev" + vrl_announcement: "https://vector.dev/blog/vector-remap-language" + vrl_error_handling: "https://vector.dev/docs/reference/vrl/errors/#handling" + vrl_error_handling_assigning: "https://vector.dev/docs/reference/vrl/errors/#assigning" + vrl_error_handling_coalescing: "https://vector.dev/docs/reference/vrl/errors/#coalesing" + vrl_error_handling_raising: "https://vector.dev/docs/reference/vrl/errors/#raising" + vrl_errors_reference: "https://vector.dev/docs/reference/vrl/errors/" + vrl_expressions: "https://vector.dev/docs/reference/vrl/expressions/" + vrl_fail_safety: "https://vector.dev/docs/reference/vrl/#fail-safety" + vrl_features: "https://vector.dev/docs/reference/vrl/#features" + vrl_functions: "https://vector.dev/docs/reference/vrl/functions/" + vrl_literals: "https://vector.dev/docs/reference/vrl/expressions/#literal-expressions" + vrl_parsing_functions: "https://vector.dev/docs/reference/vrl/functions/#parse-functions" + vrl_path_expressions: "https://vector.dev/docs/reference/vrl/expressions/#path" + vrl_performance: "https://vector.dev/docs/reference/vrl/#performance" + vrl_reference: "https://vector.dev/docs/reference/vrl/" + vrl_runtime_errors: "https://vector.dev/docs/reference/vrl/errors/#runtime-errors" + vrl_safety: "https://vector.dev/docs/reference/vrl/#safety" + vrl_type_safety: "https://vector.dev/docs/reference/vrl/#type-safety" + vote_feature: "https://github.com/timberio/vector/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3A%22Type%3A+New+Feature%22" + wasm: "https://webassembly.org/" + wasm_languages: "https://github.com/appcypher/awesome-wasm-langs" + wikipedia: "https://en.wikipedia.org" + windows: "https://www.microsoft.com/en-us/windows" + windows_installer: "https://en.wikipedia.org/wiki/Windows_Installer" + windows_service: "https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.management/new-service" + yaml: "https://yaml.org/" + yum: "https://en.wikipedia.org/wiki/Yum_(software)" + zlib: "https://www.zlib.net" + zstd: "https://zstd.net" +} +configuration: { + configuration: { + data_dir: { + common: false + description: """ + The directory used for persisting Vector state, such + as on-disk buffers, file checkpoints, and more. + Please make sure the Vector project has write + permissions to this directory. + """ + name: "data_dir" + required: false + warnings: [] + type: { + string: { + default: "/var/lib/vector/" + examples: ["/var/lib/vector", "/var/local/lib/vector/", "/home/user/vector/"] + syntax: "literal" + } + } + } + healthchecks: { + common: false + category: "Healthchecks" + description: "Configures health checks for all sinks." + name: "healthchecks" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + enabled: { + common: true + description: """ + Disables all health checks if false, otherwise sink specific + option overrides it. + """ + name: "enabled" + required: false + warnings: [] + type: { + bool: { + default: true + } + } + } + require_healthy: { + common: false + description: """ + Exit on startup if any sinks' health check fails. Overridden by + `--require-healthy` command line flag. + """ + name: "require_healthy" + required: false + warnings: [] + type: { + bool: { + default: false + } + } + } + } + } + } + } + } + how_it_works: { + environment_variables: { + #Subsection: { + title: string + body: string + } + name: "environment_variables" + title: "Environment variables" + body: """ + Vector will interpolate environment variables within your configuration file + with the following syntax: + + ```toml title="vector.toml" + [transforms.add_host] + type = "add_fields" + + [transforms.add_host.fields] + host = "${HOSTNAME}" + environment = "${ENV:-development}" # default value when not present + ``` + """ + sub_sections: [{ + title: "Default values" + body: """ + Default values can be supplied via the `:-` syntax: + + ```toml + option = "${ENV_VAR:-default}" + ``` + """ + }, { + title: "Escaping" + body: """ + You can escape environment variable by preceding them with a `$` character. For + example `$${HOSTNAME}` will be treated _literally_ in the above environment + variable example. + """ + }] + } + formats: { + #Subsection: { + title: string + body: string + } + name: "formats" + title: "Formats" + body: """ + Vector supports [TOML](https://github.com/toml-lang/toml), [YAML](https://yaml.org/), and [JSON](https://en.wikipedia.org/wiki/JSON) to + ensure Vector fits into your workflow. A side benefit of supporting JSON is the + enablement of data templating languages like [Jsonnet](https://jsonnet.org/) and + [Cue](https://cuelang.org/). + """ + } + location: { + #Subsection: { + title: string + body: string + } + name: "location" + title: "Location" + body: """ + The location of your Vector configuration file depends on your installation method. For most Linux + based systems, the file can be found at `/etc/vector/vector.toml`. + """ + } + multiple: { + #Subsection: { + title: string + body: string + } + name: "multiple" + title: "Multiple files" + body: """ + You can pass multiple configuration files when starting Vector: + + ```bash + vector --config vector1.toml --config vector2.toml + ``` + + Or use a [globbing syntax](https://en.wikipedia.org/wiki/Glob_(programming)): + + ```bash + vector --config /etc/vector/*.toml + ``` + """ + } + wildcards: { + #Subsection: { + title: string + body: string + } + name: "wildcards" + title: "Wildcards in identifiers" + body: """ + Vector supports wildcards (`*`) in component identifiers when building your topology, but only supports + them as the last character. For example: + + ```toml + [sources.app1_logs] + type = "file" + includes = ["/var/log/app1.log"] + + [sources.app2_logs] + type = "file" + includes = ["/var/log/app.log"] + + [sources.system_logs] + type = "file" + includes = ["/var/log/system.log"] + + [sinks.app_logs] + type = "datadog_logs" + inputs = ["app*"] + + [sinks.archive] + type = "aws_s3" + inputs = ["app*", "system_logs"] + ``` + """ + } + } +} +data_model: { + schema: { + log: { + common: true + category: "Log" + description: """ + A Vector log event is a structured representation of a + point-in-time event. It contains an arbitrary set of + fields that describe the event. + + A key tenet of Vector is to remain schema neutral. This + ensures that Vector can work with any schema, supporting + legacy and future schemas as your needs evolve. Vector + does not require any specific fields, and each component + will document the fields it provides. + """ + name: "log" + required: false + warnings: [] + type: { + object: { + examples: [{ + host: "my.host.com" + message: "Hello world" + timestamp: "2020-11-01T21:15:47+00:00" + custom: "field" + }] + options: { + "*": { + common: true + description: "An arbitrary set of key/value pairs that can be infinitely nested." + name: "*" + required: false + warnings: [] + type: { + "*": {} + } + } + } + } + } + } + metric: { + common: true + category: "Metric" + description: """ + A Vector metric event represents a numerical operation + performed on a time series. Unlike other tools, metrics + in Vector are first class citizens, they are not represented + as structured logs. This makes them interoperable with + various metrics services without the need for any + transformation. + + Vector's metric data model favors accuracy and correctness over + ideological purity. Therefore, Vector's metric types are a + conglomeration of various metric types found in the wild, such as + Prometheus and Statsd. This\tensures metric data is _correctly_ + interoperable between systems. + """ + name: "metric" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + counter: { + common: true + category: "Counter" + description: """ + A single value that can only be incremented + or reset to zero value, it cannot be + decremented. + """ + name: "counter" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + value: { + description: "The value to increment the counter by. Can only be positive." + name: "value" + required: true + warnings: [] + type: { + float: { + examples: [1.0, 10.0, 500.0] + } + } + } + } + } + } + } + distribution: { + common: true + category: "Distribution" + description: """ + A distribution represents a distribution of + sampled values. It is used with services + that support global histograms and summaries. + """ + name: "distribution" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + samples: { + description: "The set of sampled values." + name: "samples" + required: true + warnings: [] + type: { + array: { + items: { + type: { + object: { + examples: [] + options: { + rate: { + description: "The rate at which this value was sampled." + name: "rate" + required: true + warnings: [] + type: { + uint: { + examples: [12, 43, 25] + unit: null + } + } + } + value: { + description: "The value being sampled." + name: "value" + required: true + warnings: [] + type: { + uint: { + unit: null + } + } + } + } + } + } + } + } + } + } + statistic: { + description: "The statistic to be calculated from the values." + name: "statistic" + required: true + warnings: [] + type: { + string: { + enum: { + histogram: "Counts values in buckets." + summary: "Calculates quantiles of values." + } + examples: ["histogram", "summary"] + syntax: "literal" + } + } + } + } + } + } + } + gauge: { + common: true + category: "Gauge" + description: """ + A gauge represents a point-in-time value + that can increase and decrease. Vector's + internal gauge type represents changes to + that value. Gauges should be used to track + fluctuations in values, like current memory + or CPU usage. + """ + name: "gauge" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + value: { + description: "A specific point-in-time value for the gauge." + name: "value" + required: true + warnings: [] + type: { + float: { + examples: [1.0, 10.0, 500.0] + } + } + } + } + } + } + } + histogram: { + common: true + category: "Histogram" + description: """ + Also called a "timer". A histogram samples + observations (usually things like request + durations or response sizes) and counts them + in configurable buckets. It also provides a + sum of all observed values. + """ + name: "histogram" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + buckets: { + description: "The set of buckets containing the histogram values." + name: "buckets" + required: true + warnings: [] + type: { + array: { + items: { + type: { + object: { + examples: [] + options: { + count: { + description: "The number of values contained within this bucket." + name: "count" + required: true + warnings: [] + type: { + uint: { + examples: [1, 10, 25, 100] + unit: null + } + } + } + upper_limit: { + description: "The upper limit of the samples within the bucket." + name: "upper_limit" + required: true + warnings: [] + type: { + uint: { + unit: null + } + } + } + } + } + } + } + } + } + } + count: { + description: "The total number of values contained within the histogram." + name: "count" + required: true + warnings: [] + type: { + uint: { + examples: [1, 10, 25, 100] + unit: null + } + } + } + sum: { + description: "The sum of all values contained within the histogram." + name: "sum" + required: true + warnings: [] + type: { + float: { + examples: [1.0, 10.0, 25.0, 100.0] + } + } + } + } + } + } + } + kind: { + description: "The metric value kind." + name: "kind" + required: true + warnings: [] + type: { + string: { + enum: { + absolute: "The metric value is absolute and replaces values as it is received downstream." + incremental: "The metric value increments a cumulated value as it is received downstream." + } + examples: ["absolute", "incremental"] + syntax: "literal" + } + } + } + name: { + description: "The metric name." + name: "name" + required: true + warnings: [] + type: { + string: { + examples: ["memory_available_bytes"] + syntax: "literal" + } + } + } + namespace: { + description: "The metric namespace. Depending on the service, this will prepend the name or use native namespacing facilities." + name: "namespace" + required: true + warnings: [] + type: { + string: { + examples: ["host", "apache", "nginx"] + syntax: "literal" + } + } + } + set: { + common: true + category: "Set" + description: "A set represents an array of unique values." + name: "set" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + values: { + description: "The list of unique values." + name: "values" + required: true + warnings: [] + type: { + array: { + items: { + type: { + string: { + examples: ["value1", "value2"] + syntax: "literal" + } + } + } + } + } + } + } + } + } + } + summary: { + common: true + category: "Summary" + description: """ + Similar to a histogram, a summary samples + observations (usually things like request + durations and response sizes). While it also + provides a total count of observations and a + sum of all observed values, it calculates + configurable quantiles over a sliding time + window. + """ + name: "summary" + required: false + warnings: [] + type: { + object: { + examples: [] + options: { + count: { + description: "The total number of values contained within the summary." + name: "count" + required: true + warnings: [] + type: { + uint: { + examples: [54] + unit: null + } + } + } + quantiles: { + description: "The set of observations." + name: "quantiles" + required: true + warnings: [] + type: { + array: { + items: { + type: { + object: { + examples: [] + options: { + value: { + description: "The value of this quantile range." + name: "value" + required: true + warnings: [] + type: { + uint: { + unit: null + } + } + } + upper_limit: { + description: "The upper limit for this quantile range, where 0 ≤ upper_limit ≤ 1." + name: "upper_limit" + required: true + warnings: [] + type: { + uint: { + unit: null + } + } + } + } + } + } + } + } + } + } + sum: { + description: "The sum of all values contained within the histogram." + name: "sum" + required: true + warnings: [] + type: { + float: { + examples: [1.0, 10.0, 25.0, 100.0] + } + } + } + } + } + } + } + tags: { + category: "Tags" + description: "The metric tags. Key/value pairs, nesting is not allowed." + name: "tags" + required: true + warnings: [] + type: { + object: { + examples: [{ + host: "my.host.com" + instance_id: "abcd1234" + }] + options: { + "*": { + common: true + description: "Key/value pairs, nesting is not allowed." + name: "*" + required: false + warnings: [] + type: { + "*": {} + } + } + } + } + } + } + timestamp: { + description: "The metric timestamp; when the metric was created." + name: "timestamp" + required: true + warnings: [] + type: { + timestamp: { + examples: ["2020-10-10T17:07:36.452332Z"] + } + } + } + } + } + } + } + } +} +installation: { + #PackageManager: { + description: string + name: string + title: string + } + #PackageManagers: {} + #Commands: { + configure: "none" + install: string | null + logs: string | null + reload: string | null + restart: string | null + start: string | null + stop: string | null + top: "vector top" + uninstall: string + upgrade: string | null + } + #Interface: { + archs: ["ARM64" | "ARMv7" | "x86_64"] + description: string + paths: { + bin: string | null + bin_in_path: bool | null + config: string | null + } + role_implementations: {} + name: string + title: string + } + #Interfaces: {} + #RoleImplementation: { + commands: { + configure: "none" + install: string | null + logs: string | null + reload: string | null + restart: string | null + start: string | null + stop: string | null + top: "vector top" + uninstall: string + upgrade: string | null + } + description: string + name: string + title: string + tutorials: { + installation: [] + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: [] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + #RoleImplementations: {} + #Tutorials: { + installation: [] + } + #Variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: [] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + #Role: { + name: string + title: string + sub_roles: {} + } + #Roles: {} + #OperatingSystem: { + description: string + family: "Linux" | "macOS" | "Windows" + interfaces: [{ + archs: ["ARM64" | "ARMv7" | "x86_64"] + description: string + paths: { + bin: string | null + bin_in_path: bool | null + config: string | null + } + role_implementations: {} + name: string + title: string + }] + minimum_supported_version: string | null + name: string + shell: string + title: string + } + #OperatingSystems: {} + #Platform: { + description: string + how_it_works: {} + minimum_supported_version: string | null + name: string + title: string + } + #Platforms: {} + #Download: { + available_on_latest: bool + available_on_nightly: bool + arch: "ARM64" | "ARMv7" | "x86_64" + file_name: string + file_type: string + library: string | null + name: string + os: "Linux" | "macOS" | "Windows" + title: "\(os) (\(arch))" + type: "archive" | "package" + } + #Downloads: {} + downloads: { + "aarch64-unknown-linux-musl-tar-gz": { + available_on_latest: true + available_on_nightly: true + arch: "ARM64" + file_name: "vector-{version}-aarch64-unknown-linux-musl.tar.gz" + file_type: "tar.gz" + library: "musl" + name: "aarch64-unknown-linux-musl-tar-gz" + os: "Linux" + title: "Linux (ARM64)" + type: "archive" + } + "x86_64-unknown-linux-musl-tar-gz": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-x86_64-unknown-linux-musl.tar.gz" + file_type: "tar.gz" + library: "musl" + name: "x86_64-unknown-linux-musl-tar-gz" + os: "Linux" + title: "Linux (x86_64)" + type: "archive" + } + "armv7-unknown-linux-musleabihf-tar-gz": { + available_on_latest: true + available_on_nightly: true + arch: "ARMv7" + file_name: "vector-{version}-armv7-unknown-linux-musleabihf.tar.gz" + file_type: "tar.gz" + library: "musl" + name: "armv7-unknown-linux-musleabihf-tar-gz" + os: "Linux" + title: "Linux (ARMv7)" + type: "archive" + } + "x86_64-rpm": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-1.x86_64.rpm" + file_type: "rpm" + library: "gnu" + name: "x86_64-rpm" + os: "Linux" + package_manager: "rpm" + title: "Linux (x86_64)" + type: "package" + } + "x86_64-apple-darwin-tar-gz": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-x86_64-apple-darwin.tar.gz" + file_type: "tar.gz" + library: null + name: "x86_64-apple-darwin-tar-gz" + os: "macOS" + title: "macOS (x86_64)" + type: "archive" + } + "armv7-unknown-linux-gnueabihf-tar-gz": { + available_on_latest: true + available_on_nightly: true + arch: "ARMv7" + file_name: "vector-{version}-armv7-unknown-linux-gnueabihf.tar.gz" + file_type: "tar.gz" + library: "gnu" + name: "armv7-unknown-linux-gnueabihf-tar-gz" + os: "Linux" + title: "Linux (ARMv7)" + type: "archive" + } + "armhf-deb": { + available_on_latest: true + available_on_nightly: true + arch: "ARMv7" + file_name: "vector-{version}-armhf.deb" + file_type: "deb" + library: "gnu" + name: "armhf-deb" + os: "Linux" + package_manager: "dpkg" + title: "Linux (ARMv7)" + type: "package" + } + "x64-msi": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-x64.msi" + file_type: "msi" + library: null + name: "x64-msi" + os: "Windows" + package_manager: "msi" + title: "Windows (x86_64)" + type: "package" + } + "amd64-deb": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-amd64.deb" + file_type: "deb" + library: "gnu" + name: "amd64-deb" + os: "Linux" + package_manager: "dpkg" + title: "Linux (x86_64)" + type: "package" + } + "arm64-deb": { + available_on_latest: true + available_on_nightly: true + arch: "ARM64" + file_name: "vector-{version}-arm64.deb" + file_type: "deb" + library: "gnu" + name: "arm64-deb" + os: "Linux" + package_manager: "dpkg" + title: "Linux (ARM64)" + type: "package" + } + "x86_64-pc-windows-msvc-zip": { + available_on_latest: true + available_on_nightly: true + arch: "x86_64" + file_name: "vector-{version}-x86_64-pc-windows-msvc.zip" + file_type: "zip" + library: null + name: "x86_64-pc-windows-msvc-zip" + os: "Windows" + title: "Windows (x86_64)" + type: "archive" + } + "armv7-rpm": { + available_on_latest: true + available_on_nightly: true + arch: "ARMv7" + file_name: "vector-{version}-1.armv7.rpm" + file_type: "rpm" + library: "gnu" + name: "armv7-rpm" + os: "Linux" + package_manager: "rpm" + title: "Linux (ARMv7)" + type: "package" + } + "aarch64-rpm": { + available_on_latest: true + available_on_nightly: true + arch: "ARM64" + file_name: "vector-{version}-1.aarch64.rpm" + file_type: "rpm" + library: "gnu" + name: "aarch64-rpm" + os: "Linux" + package_manager: "rpm" + title: "Linux (ARM64)" + type: "package" + } + } + operating_systems: { + nixos: { + title: "NixOS" + description: """ + [NixOS](https://nixos.org/) is a Linux distribution built on top of the + Nix package manager. It uses declarative configuration and + allows reliable system upgrades. + """ + interfaces: [{ + title: "Nix" + description: """ + [Nix](https://nixos.org/nix/) is a cross-platform package manager + implemented on a functional deployment model where software is + installed into unique directories generated through + cryptographic hashes, it is also the name of the programming + language. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "nix" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --install --attr vector" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config /etc/vector/vector.{config_format}" + stop: null + top: "vector top" + uninstall: "nix-env --uninstall vector" + upgrade: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --install --attr vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config /etc/vector/vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --install --attr vector" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config /etc/vector/vector.{config_format}" + stop: null + top: "vector top" + uninstall: "nix-env --uninstall vector" + upgrade: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --install --attr vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config /etc/vector/vector.{config_format}" + }] + } + } + } + name: "nix" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }] + family: "Linux" + minimum_supported_version: "15.09" + name: "nixos" + shell: "bash" + } + raspbian: { + title: "Raspbian" + description: """ + [Raspbian](https://www.raspbian.org/) is the operating system used on + Raspberry Pis. It is a Debian-based operating system designed for + compact single-board computers. + """ + interfaces: [{ + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }] + family: "Linux" + minimum_supported_version: null + name: "raspbian" + shell: "bash" + } + centos: { + title: "CentOS" + description: """ + [CentOS](https://www.centos.org/) is a Linux distribution that is + functionally compatible with its upstream source, Red Hat Enterprise + Linux. + """ + interfaces: [{ + title: "YUM" + description: """ + The [Yellowdog Updater](https://en.wikipedia.org/wiki/Yum_(software)), Modified (YUM) is a free and + open-source command-line package-manager for Linux operating system + using the RPM Package Manager. + + Our Yum repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-rpm). + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "yum" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "yum" + }, { + title: "RPM" + description: """ + [RPM Package Manager](https://rpm.org/) is a free and open-source package + management system for installing and managing software on Fedra, CentOS, + OpenSUSE, OpenMandriva, Red Hat Enterprise Linux, and other + related Linux-based systems. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "rpm" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "rpm" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }, { + title: "Helm 3" + description: """ + [Helm](https://helm.sh/) is a package manager for Kubernetes that + facilitates the deployment and management of applications and services + on Kubernetes clusters. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: null + } + package_manager_name: "helm" + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + add_repo: "helm repo add timberio https://packages.timber.io/helm/latest" + helm_values_show: "helm show values timberio/vector-agent" + configure: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + install: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + logs: "kubectl logs --namespace vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart --namespace vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "helm uninstall --namespace vector vector" + upgrade: "helm repo update && helm upgrade --namespace vector vector timberio/vector-agent --reuse-values" + } + name: "agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: "helm repo add timberio https://packages.timber.io/helm/latest" + }, { + title: "Check available Helm chart configuration options" + command: "helm show values timberio/vector-agent" + }, { + title: "Configure Vector" + command: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + }, { + title: "Install Vector" + command: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + }] + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + } + name: "helm3" + }, { + title: "kubectl" + description: """ + The [Kubernetes command-line tool](https://kubernetes.io/docs/reference/kubectl/overview/), kubectl, allows + users to run commands against Kubernetes clusters facilitating + application deployment, scaling, monitoring, and introspection. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: "vector.toml" + } + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + install: "kubectl apply -k ." + logs: "kubectl logs -n vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart -n vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "kubectl delete -k ." + upgrade: null + verify_config: "kubectl kustomize" + prepare_namespace: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + prepare_kustomization: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + configure: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + } + name: "agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + tutorials: { + installation: [{ + title: "Define Vector's namespace" + command: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + }, { + title: "Prepare kustomization" + command: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + }, { + title: "Configure Vector" + command: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + }, { + title: "Verify the config" + command: "kubectl kustomize" + }, { + title: "Install Vector" + command: "kubectl apply -k ." + }] + } + } + } + name: "kubectl" + }] + family: "Linux" + minimum_supported_version: "6" + name: "centos" + shell: "bash" + } + rhel: { + title: "RHEL" + description: """ + [Red Hat Enterprise Linux](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux) is a Linux distribution + developed by Red Hat for the commercial market. + """ + interfaces: [{ + title: "YUM" + description: """ + The [Yellowdog Updater](https://en.wikipedia.org/wiki/Yum_(software)), Modified (YUM) is a free and + open-source command-line package-manager for Linux operating system + using the RPM Package Manager. + + Our Yum repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-rpm). + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "yum" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "yum" + }, { + title: "RPM" + description: """ + [RPM Package Manager](https://rpm.org/) is a free and open-source package + management system for installing and managing software on Fedra, CentOS, + OpenSUSE, OpenMandriva, Red Hat Enterprise Linux, and other + related Linux-based systems. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "rpm" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "rpm" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }, { + title: "Helm 3" + description: """ + [Helm](https://helm.sh/) is a package manager for Kubernetes that + facilitates the deployment and management of applications and services + on Kubernetes clusters. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: null + } + package_manager_name: "helm" + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + add_repo: "helm repo add timberio https://packages.timber.io/helm/latest" + helm_values_show: "helm show values timberio/vector-agent" + configure: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + install: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + logs: "kubectl logs --namespace vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart --namespace vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "helm uninstall --namespace vector vector" + upgrade: "helm repo update && helm upgrade --namespace vector vector timberio/vector-agent --reuse-values" + } + name: "agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: "helm repo add timberio https://packages.timber.io/helm/latest" + }, { + title: "Check available Helm chart configuration options" + command: "helm show values timberio/vector-agent" + }, { + title: "Configure Vector" + command: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + }, { + title: "Install Vector" + command: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + }] + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + } + name: "helm3" + }, { + title: "kubectl" + description: """ + The [Kubernetes command-line tool](https://kubernetes.io/docs/reference/kubectl/overview/), kubectl, allows + users to run commands against Kubernetes clusters facilitating + application deployment, scaling, monitoring, and introspection. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: "vector.toml" + } + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + install: "kubectl apply -k ." + logs: "kubectl logs -n vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart -n vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "kubectl delete -k ." + upgrade: null + verify_config: "kubectl kustomize" + prepare_namespace: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + prepare_kustomization: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + configure: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + } + name: "agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + tutorials: { + installation: [{ + title: "Define Vector's namespace" + command: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + }, { + title: "Prepare kustomization" + command: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + }, { + title: "Configure Vector" + command: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + }, { + title: "Verify the config" + command: "kubectl kustomize" + }, { + title: "Install Vector" + command: "kubectl apply -k ." + }] + } + } + } + name: "kubectl" + }] + family: "Linux" + minimum_supported_version: "4" + name: "rhel" + shell: "bash" + } + debian: { + title: "Debian" + description: """ + [Debian](https://www.debian.org/)), also known as Debian GNU/Linux, is a Linux + distribution composed of free and open-source software, + developed by the community-supported Debian Project. + """ + interfaces: [{ + title: "Apt" + description: """ + [Advanced Package Tool](https://en.wikipedia.org/wiki/APT_(software)), or APT, is a free package manager + that handles the installation and removal of software on Debian, + Ubuntu, and other Linux distributions. + + Our APT repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-deb). + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "apt" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + add_repo: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + configure: "none" + install: "sudo apt-get install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo apt remove vector" + upgrade: "sudo apt-get upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo apt-get install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + add_repo: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + configure: "none" + install: "sudo apt-get install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo apt remove vector" + upgrade: "sudo apt-get upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo apt-get install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "apt" + }, { + title: "DPKG" + description: """ + [Dpkg](https://wiki.debian.org/dpkg) is the software that powers the package management + system in the Debian operating system and its derivatives. Dpkg is used + to install and manage software via `.deb` packages. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "dpkg" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo dpkg -r vector" + upgrade: null + } + variables: { + arch: ["amd64", "arm64", "armhf"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo dpkg -r vector" + upgrade: null + } + variables: { + arch: ["amd64", "arm64", "armhf"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "dpkg" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }, { + title: "Helm 3" + description: """ + [Helm](https://helm.sh/) is a package manager for Kubernetes that + facilitates the deployment and management of applications and services + on Kubernetes clusters. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: null + } + package_manager_name: "helm" + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + add_repo: "helm repo add timberio https://packages.timber.io/helm/latest" + helm_values_show: "helm show values timberio/vector-agent" + configure: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + install: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + logs: "kubectl logs --namespace vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart --namespace vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "helm uninstall --namespace vector vector" + upgrade: "helm repo update && helm upgrade --namespace vector vector timberio/vector-agent --reuse-values" + } + name: "agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: "helm repo add timberio https://packages.timber.io/helm/latest" + }, { + title: "Check available Helm chart configuration options" + command: "helm show values timberio/vector-agent" + }, { + title: "Configure Vector" + command: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + }, { + title: "Install Vector" + command: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + }] + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + } + name: "helm3" + }, { + title: "kubectl" + description: """ + The [Kubernetes command-line tool](https://kubernetes.io/docs/reference/kubectl/overview/), kubectl, allows + users to run commands against Kubernetes clusters facilitating + application deployment, scaling, monitoring, and introspection. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: "vector.toml" + } + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + install: "kubectl apply -k ." + logs: "kubectl logs -n vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart -n vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "kubectl delete -k ." + upgrade: null + verify_config: "kubectl kustomize" + prepare_namespace: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + prepare_kustomization: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + configure: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + } + name: "agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + tutorials: { + installation: [{ + title: "Define Vector's namespace" + command: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + }, { + title: "Prepare kustomization" + command: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + }, { + title: "Configure Vector" + command: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + }, { + title: "Verify the config" + command: "kubectl kustomize" + }, { + title: "Install Vector" + command: "kubectl apply -k ." + }] + } + } + } + name: "kubectl" + }] + family: "Linux" + minimum_supported_version: "4" + name: "debian" + shell: "bash" + } + ubuntu: { + title: "Ubuntu" + description: "[Ubuntu](https://ubuntu.com/) is a Linux distribution based on Debian." + interfaces: [{ + title: "Apt" + description: """ + [Advanced Package Tool](https://en.wikipedia.org/wiki/APT_(software)), or APT, is a free package manager + that handles the installation and removal of software on Debian, + Ubuntu, and other Linux distributions. + + Our APT repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-deb). + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "apt" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + add_repo: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + configure: "none" + install: "sudo apt-get install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo apt remove vector" + upgrade: "sudo apt-get upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo apt-get install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + add_repo: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + configure: "none" + install: "sudo apt-get install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo apt remove vector" + upgrade: "sudo apt-get upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + 'https://repositories.timber.io/public/vector/cfg/setup/bash.deb.sh' \\ + | sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo apt-get install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "apt" + }, { + title: "DPKG" + description: """ + [Dpkg](https://wiki.debian.org/dpkg) is the software that powers the package management + system in the Debian operating system and its derivatives. Dpkg is used + to install and manage software via `.deb` packages. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "dpkg" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo dpkg -r vector" + upgrade: null + } + variables: { + arch: ["amd64", "arm64", "armhf"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo dpkg -r vector" + upgrade: null + } + variables: { + arch: ["amd64", "arm64", "armhf"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + curl --proto '=https' --tlsv1.2 -O https://packages.timber.io/vector/{version}/vector-{version}-{arch}.deb && \\ + \tsudo dpkg -i vector-{version}-{arch}.deb + """ + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "dpkg" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }, { + title: "Helm 3" + description: """ + [Helm](https://helm.sh/) is a package manager for Kubernetes that + facilitates the deployment and management of applications and services + on Kubernetes clusters. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: null + } + package_manager_name: "helm" + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + add_repo: "helm repo add timberio https://packages.timber.io/helm/latest" + helm_values_show: "helm show values timberio/vector-agent" + configure: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + install: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + logs: "kubectl logs --namespace vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart --namespace vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "helm uninstall --namespace vector vector" + upgrade: "helm repo update && helm upgrade --namespace vector vector timberio/vector-agent --reuse-values" + } + name: "agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: "helm repo add timberio https://packages.timber.io/helm/latest" + }, { + title: "Check available Helm chart configuration options" + command: "helm show values timberio/vector-agent" + }, { + title: "Configure Vector" + command: """ + cat <<-'VALUES' > values.yaml + # The Vector Kubernetes integration automatically defines a + # kubernetes_logs source that is made available to you. + # You do not need to define a log source. + sinks: + # Adjust as necessary. By default we use the console sink + # to print all data. This allows you to see Vector working. + # https://vector.dev/docs/reference/sinks/ + stdout: + type: console + inputs: ["kubernetes_logs"] + target: "stdout" + encoding: "json" + VALUES + """ + }, { + title: "Install Vector" + command: "helm install --namespace vector --create-namespace vector timberio/vector-agent --values values.yaml" + }] + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + } + name: "helm3" + }, { + title: "kubectl" + description: """ + The [Kubernetes command-line tool](https://kubernetes.io/docs/reference/kubectl/overview/), kubectl, allows + users to run commands against Kubernetes clusters facilitating + application deployment, scaling, monitoring, and introspection. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: null + bin_in_path: null + config: "vector.toml" + } + platform_name: "kubernetes" + role_implementations: { + agent: { + title: "Agent" + description: """ + The agent role is designed to collect all Kubernetes + log data on each Node. Vector runs as a + [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) and tails + logs for the entire Pod, automatically enriching them + with Kubernetes metadata via the + [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/). Collection + is handled automatically, and it is intended for you to + adjust your pipeline as\tnecessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + commands: { + install: "kubectl apply -k ." + logs: "kubectl logs -n vector daemonset/vector-agent" + reload: null + restart: "kubectl rollout restart -n vector daemonset/vector-agent" + start: null + stop: null + top: null + uninstall: "kubectl delete -k ." + upgrade: null + verify_config: "kubectl kustomize" + prepare_namespace: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + prepare_kustomization: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + configure: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + } + name: "agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: {} + sinks: { + out: { + type: "console" + inputs: ["kubernetes_logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + tutorials: { + installation: [{ + title: "Define Vector's namespace" + command: "kubectl create namespace --dry-run=client -oyaml vector > namespace.yaml" + }, { + title: "Prepare kustomization" + command: """ + cat <<-'KUSTOMIZATION' > kustomization.yaml + # Override the namespace of all of the resources we manage. + namespace: vector + + bases: + # Include Vector recommended base (from git). + - github.com/timberio/vector/distribution/kubernetes/vector-agent?ref=v0.11 + + images: + # Override the Vector image to avoid use of the sliding tag. + - name: timberio/vector + newName: timberio/vector + newTag: 0.11.X-debian + + resources: + # A namespace to keep the resources at. + - namespace.yaml + + configMapGenerator: + # Provide a custom `ConfigMap` for Vector. + - name: vector-agent-config + files: + - vector-agent.toml + + generatorOptions: + # We do not want a suffix at the `ConfigMap` name. + disableNameSuffixHash: true + KUSTOMIZATION + """ + }, { + title: "Configure Vector" + command: """ + cat <<-'VECTORCFG' > vector-agent.toml + # The Vector Kubernetes integration automatically defines a + # `kubernetes_logs` source that is made available to you. + # You do not need to define a log source. + + {config} + VECTORCFG + """ + }, { + title: "Verify the config" + command: "kubectl kustomize" + }, { + title: "Install Vector" + command: "kubectl apply -k ." + }] + } + } + } + name: "kubectl" + }] + family: "Linux" + minimum_supported_version: "14.04" + name: "ubuntu" + shell: "bash" + } + macos: { + title: "macOS" + description: """ + [macOS](https://en.wikipedia.org/wiki/MacOS) is the primary operating system for Apple's + Mac computers. It is a certified Unix system based on Apple's + Darwin operating system. + """ + interfaces: [{ + title: "Homebrew" + description: """ + [Homebrew](https://brew.sh/) is a free and open-source package + management system that manage software installation and management for + Apple's macOS operating system and other supported Linux systems. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "homebrew" + paths: { + bin: "/usr/local/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "brew tap timberio/brew && brew install vector" + logs: "tail -f /usr/local/var/log/vector.log" + reload: "killall -s SIGHUP vector" + restart: "brew services restart vector" + start: "brew services start vector" + stop: "brew services stop vector" + top: "vector top" + uninstall: "brew remove vector" + upgrade: "brew update && brew upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/**/*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + via Vector's [`file` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: "brew tap timberio/brew && brew install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "brew services restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "brew tap timberio/brew && brew install vector" + logs: "tail -f /usr/local/var/log/vector.log" + reload: "killall -s SIGHUP vector" + restart: "brew services restart vector" + start: "brew services start vector" + stop: "brew services stop vector" + top: "vector top" + uninstall: "brew remove vector" + upgrade: "brew update && brew upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "brew tap timberio/brew && brew install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "brew services restart vector" + }] + } + } + } + name: "homebrew" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + via Vector's [`file` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/**/*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }] + family: "macOS" + minimum_supported_version: "10.5" + name: "macos" + shell: "bash" + } + "amazon-linux": { + title: "Amazon Linux" + description: """ + The [Amazon Linux AMI](https://aws.amazon.com/amazon-linux-ami/) is a supported and + maintained Linux image provided by Amazon Web Services for use on + Amazon Elastic Compute Cloud (Amazon EC2). It is designed to + provide a stable, secure, and high performance execution + environment for applications running on Amazon EC2. + """ + interfaces: [{ + title: "YUM" + description: """ + The [Yellowdog Updater](https://en.wikipedia.org/wiki/Yum_(software)), Modified (YUM) is a free and + open-source command-line package-manager for Linux operating system + using the RPM Package Manager. + + Our Yum repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-rpm). + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "yum" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + add_repo: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + configure: "none" + install: "sudo yum install vector" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo yum remove vector" + upgrade: "sudo yum upgrade vector" + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Add the Vector repo" + command: """ + curl -1sLf \\ + \t'https://repositories.timber.io/public/vector/cfg/setup/bash.rpm.sh' \\ + \t| sudo -E bash + """ + }, { + title: "Install Vector" + command: "sudo yum install vector" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "yum" + }, { + title: "RPM" + description: """ + [RPM Package Manager](https://rpm.org/) is a free and open-source package + management system for installing and managing software on Fedra, CentOS, + OpenSUSE, OpenMandriva, Red Hat Enterprise Linux, and other + related Linux-based systems. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + package_manager_name: "rpm" + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "/etc/vector/vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + logs: "sudo journalctl -fu vector" + reload: "systemctl kill -s HUP --kill-who=main vector.service" + restart: "sudo systemctl restart vector" + start: "sudo systemctl start vector" + stop: "sudo systemctl stop vector" + top: "vector top" + uninstall: "sudo rpm -e vector" + upgrade: null + } + variables: { + arch: ["x86_64", "aarch64", "armv7"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "sudo rpm -i https://packages.timber.io/vector/{version}/vector-{version}-1.{arch}.rpm" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Restart Vector" + command: "sudo systemctl restart vector" + }] + } + } + } + name: "rpm" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + from [Journald](https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html) via Vector's + [`journald` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "journald" + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }] + family: "Linux" + minimum_supported_version: "1" + name: "amazon-linux" + shell: "bash" + } + windows: { + title: "Windows" + description: """ + [Microsoft Windows](https://www.microsoft.com/en-us/windows) is an operating system + developed and sold by Microsoft. + """ + interfaces: [{ + title: "MSI (Windows Installer)" + description: """ + MSI refers to the file format and command line utility for + the [Windows Installer](https://en.wikipedia.org/wiki/Windows_Installer). Windows Installer + (previously known as Microsoft Installer) is an interface for Microsoft + Windows that is used to install and manage software on Windows systems. + """ + archs: ["x86_64"] + package_manager_name: "msi" + paths: { + bin: "C:\\Program Files\\Vector\\bin\\vector" + bin_in_path: true + config: "C:\\Program Files\\Vector\\config\\vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: """ + cat <<-VECTORCFG > C:\\Program Files\\Vector\\config\\vector.{config_format} + {config} + VECTORCFG + """ + install: """ + powershell Invoke-WebRequest https://packages.timber.io/vector/{version}/vector-{arch}.msi -OutFile vector-{version}-{arch}.msi && \\ + \tmsiexec /i vector-{version}-{arch}.msi /quiet + """ + logs: null + reload: null + restart: null + start: "C:\\Program Files\\Vector\\bin\\vector --config C:\\Program Files\\Vector\\config\\vector.{config_format}" + stop: null + top: "vector top" + uninstall: "msiexec /x {7FAD6F97-D84E-42CC-A600-5F4EC3460FF5} /quiet" + upgrade: null + } + variables: { + arch: ["x64"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/**/*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + via Vector's [`file` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + powershell Invoke-WebRequest https://packages.timber.io/vector/{version}/vector-{arch}.msi -OutFile vector-{version}-{arch}.msi && \\ + \tmsiexec /i vector-{version}-{arch}.msi /quiet + """ + }, { + title: "Configure Vector" + command: """ + cat <<-VECTORCFG > C:\\Program Files\\Vector\\config\\vector.{config_format} + {config} + VECTORCFG + """ + }, { + title: "Start Vector" + command: "C:\\Program Files\\Vector\\bin\\vector --config C:\\Program Files\\Vector\\config\\vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: """ + cat <<-VECTORCFG > C:\\Program Files\\Vector\\config\\vector.{config_format} + {config} + VECTORCFG + """ + install: """ + powershell Invoke-WebRequest https://packages.timber.io/vector/{version}/vector-{arch}.msi -OutFile vector-{version}-{arch}.msi && \\ + \tmsiexec /i vector-{version}-{arch}.msi /quiet + """ + logs: null + reload: null + restart: null + start: "C:\\Program Files\\Vector\\bin\\vector --config C:\\Program Files\\Vector\\config\\vector.{config_format}" + stop: null + top: "vector top" + uninstall: "msiexec /x {7FAD6F97-D84E-42CC-A600-5F4EC3460FF5} /quiet" + upgrade: null + } + variables: { + arch: ["x64"] + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: """ + powershell Invoke-WebRequest https://packages.timber.io/vector/{version}/vector-{arch}.msi -OutFile vector-{version}-{arch}.msi && \\ + \tmsiexec /i vector-{version}-{arch}.msi /quiet + """ + }, { + title: "Configure Vector" + command: """ + cat <<-VECTORCFG > C:\\Program Files\\Vector\\config\\vector.{config_format} + {config} + VECTORCFG + """ + }, { + title: "Start Vector" + command: "C:\\Program Files\\Vector\\bin\\vector --config C:\\Program Files\\Vector\\config\\vector.{config_format}" + }] + } + } + } + name: "msi" + }, { + title: "Vector Installer" + description: """ + The [Vector installer](https://sh.vector.dev) is a simple shell + script that facilitates that installation of Vector on a variety of + systems. It is an unobtrusive and simple option since it installs the + `vector` binary in your current direction. + """ + archs: ["x86_64", "ARM64", "ARMv7"] + paths: { + bin: "./vector" + bin_in_path: false + config: "./vector.{config_format}" + } + role_implementations: { + agent: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + description: """ + The agent role is designed to collect all data on + a single host. Vector runs as a background process + and interfaces with a host-level APIs for data + collection. By default, Vector will collect logs + via Vector's [`file` source](https://vector.dev/docs/reference/sources/journald/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + title: "Agent" + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/**/*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + } + sidecar: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + aggregator: { + commands: { + configure: "none" + install: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + logs: null + reload: "killall -s SIGHUP vector" + restart: null + start: "vector --config ./vector.{config_format}" + stop: null + top: "vector top" + uninstall: "rm -rf ./vector" + upgrade: null + } + variables: { + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + version: false + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Install Vector" + command: "curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | sh" + }, { + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: "vector --config ./vector.{config_format}" + }] + } + } + } + name: "vector-installer" + }, { + title: "Docker CLI" + description: """ + The [Docker CLI](https://docs.docker.com/engine/reference/commandline/cli/) is the command line interface to + the Docker platform. It is used to download, start, and manage Docker + images. + """ + archs: ["x86_64", "ARM64"] + paths: { + bin: "/usr/bin/vector" + bin_in_path: true + config: "~/vector.{config_format}" + } + platform_name: "docker" + role_implementations: { + agent: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + title: "Agent" + description: """ + The agent role is designed to collect all Docker data on + a single host. Vector runs in its own container + interfacing with the [Docker Engine API](https://docs.docker.com/engine/api/) + for log via the [`docker_logs` source](https://vector.dev/docs/reference/sources/docker_logs/) and + metrics via the [`host_metrics` source](https://vector.dev/docs/reference/sources/host_metrics/), + but it is recommended to adjust your pipeline as + necessary using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "agent" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + } + sidecar: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + logs: { + type: "file" + include: ["/var/log/my-app*.log"] + } + host_metrics: { + type: "host_metrics" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["logs", "host_metrics", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The sidecar role is designed to collect data from + a single process on the same host. By default, we + recommend using the [`file` source](https://vector.dev/docs/reference/sources/file/) + to tail the logs for that individual process, but + you could use the [`stdin` source](https://vector.dev/docs/reference/sources/stdin/), + [`socket` source](https://vector.dev/docs/reference/sources/socket/), or + [`http` source](https://vector.dev/docs/reference/sources/http/). We recommend + adjusting your pipeline as necessary using Vector's + [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "sidecar" + title: "Sidecar" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + aggregator: { + commands: { + configure: "none" + install: null + logs: "docker logs -f $(docker ps -aqf \"name=vector\")" + reload: "docker kill --signal=HUP timberio/vector" + restart: "docker restart -f $(docker ps -aqf \"name=vector\")" + start: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + stop: "docker stop timberio/vector" + top: "vector top" + uninstall: "docker rm timberio/vector timberio/vector" + upgrade: null + } + variables: { + flags: { + sources: { + aws_kinesis_firehose: """ + + -p 443:443 \\ + """ + file: """ + + -v /var/log:/var/log \\ + """ + docker: """ + + -v /var/run/docker.sock:/var/run/docker.sock \\ + """ + http: """ + + -p 80:80 \\ + """ + heroku_logs: """ + + -p 80:80 \\ + """ + socket: """ + + -p 9000:9000 \\ + """ + splunk_hec: """ + + -p 8080:8080 \\ + """ + statsd: """ + + -p 8125:8125 \\ + """ + syslog: """ + + -p 514:514 \\ + """ + vector: """ + + -p 9000:9000 \\ + """ + } + } + config: { + api: { + enabled: true + address: "127.0.0.1:8686" + } + sources: { + vector: { + type: "vector" + } + internal_metrics: { + type: "internal_metrics" + } + } + sinks: { + out: { + type: "console" + inputs: ["vector", "internal_metrics"] + encoding: { + codec: "json" + } + } + } + } + config_format: ["toml"] + variant: ["debian", "alpine", "distroless"] + version: true + } + description: """ + The aggregator role is designed to receive and + process data from multiple upstream agents. + Typically these are other Vector agents, but it + could be anything, including non-Vector agents. + By default, we recommend the [`vector` source](https://vector.dev/docs/reference/sources/vector/) + since it supports all data types, but it is + recommended to adjust your pipeline as necessary + using Vector's [sources](https://vector.dev/docs/reference/sources/), + [transforms](https://vector.dev/docs/reference/transforms/), and + [sinks](https://vector.dev/docs/reference/sinks/). + """ + name: "aggregator" + title: "Aggregator" + tutorials: { + installation: [{ + title: "Configure Vector" + command: "none" + }, { + title: "Start Vector" + command: """ + docker run \\ + -d \\ + -v ~/vector.{config_format}:/etc/vector/vector.toml:ro \\ + -p 8383:8383 \\{flags} + timberio/vector:{version}-{variant} + """ + }] + } + } + } + name: "docker-cli" + }] + family: "Windows" + minimum_supported_version: "7" + name: "windows" + shell: "powershell" + } + } + package_managers: { + dpkg: { + title: "DPKG" + description: """ + [Dpkg](https://wiki.debian.org/dpkg) is the software that powers the package management + system in the Debian operating system and its derivatives. Dpkg is used + to install and manage software via `.deb` packages. + """ + name: "dpkg" + } + apt: { + title: "APT" + description: """ + [Advanced Package Tool](https://en.wikipedia.org/wiki/APT_(software)), or APT, is a free package manager + that handles the installation and removal of software on Debian, + Ubuntu, and other Linux distributions. + + Our APT repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-deb). + """ + name: "apt" + } + homebrew: { + title: "Homebrew" + description: """ + [Homebrew](https://brew.sh/) is a free and open-source package + management system that manage software installation and management for + Apple's macOS operating system and other supported Linux systems. + """ + name: "homebrew" + } + rpm: { + title: "RPM" + description: """ + [RPM Package Manager](https://rpm.org/) is a free and open-source package + management system for installing and managing software on Fedra, CentOS, + OpenSUSE, OpenMandriva, Red Hat Enterprise Linux, and other + related Linux-based systems. + """ + name: "rpm" + } + msi: { + title: "MSI" + description: """ + MSI refers to the file format and command line utility for + the [Windows Installer](https://en.wikipedia.org/wiki/Windows_Installer). Windows Installer + (previously known as Microsoft Installer) is an interface for Microsoft + Windows that is used to install and manage software on Windows systems. + """ + name: "msi" + } + helm: { + title: "Helm" + description: """ + [Helm](https://helm.sh/) is a package manager for Kubernetes that + facilitates the deployment and management of applications and services + on Kubernetes clusters. + """ + name: "helm" + } + nix: { + title: "Nix" + description: """ + [Nix](https://nixos.org/nix/) is a cross-platform package manager + implemented on a functional deployment model where software is + installed into unique directories generated through + cryptographic hashes, it is also the name of the programming + language. + """ + name: "nix" + } + yum: { + title: "YUM" + description: """ + The [Yellowdog Updater](https://en.wikipedia.org/wiki/Yum_(software)), Modified (YUM) is a free and + open-source command-line package-manager for Linux operating system + using the RPM Package Manager. + + Our Yum repositories are provided by + [Cloudsmith](https://cloudsmith.io/~timber/repos/vector/packages/) and you + can find [instructions for manually adding + the repositories](https://cloudsmith.io/~timber/repos/vector/setup/#formats-rpm). + """ + name: "yum" + } + } + platforms: { + docker: { + title: "Docker" + description: """ + [Docker](https://www.docker.com/) is an open platform for developing, shipping, and running + applications and services. Docker enables you to separate your services from + your infrastructure so you can ship quickly. With Docker, you can manage your + infrastructure in the same ways you manage your services. By taking advantage + of Docker's methodologies for shipping, testing, and deploying code quickly, + you can significantly reduce the delay between writing code and running it in + production. + """ + how_it_works: {} + minimum_supported_version: null + name: "docker" + } + kubernetes: { + title: "Kubernetes" + description: """ + [Kubernetes](https://kubernetes.io), also known as k8s, is an + open-source container-orchestration system for automating + application deployment, scaling, and management. + """ + minimum_supported_version: "1.14" + how_it_works: { + metrics: { + #Subsection: { + title: string + body: string + } + name: "metrics" + title: "Metrics" + body: """ + Our Helm chart deployments provide quality of life around setup and maintenance of + metrics pipelines in Kubernetes. Each of the Helm charts provide an `internal_metrics` + source and `prometheus` sink out of the box. Agent deployments also expose `host_metrics` + via the same `prometheus` sink. + + Charts come with options to enable Prometheus integration via annotations or Prometheus Operator + integration via PodMonitor. Thus, the Prometheus node_exporter agent is not required when the `host_metrics` source is + enabled. + """ + } + enrichment: { + #Subsection: { + title: string + body: string + } + name: "enrichment" + title: "Enrichment" + body: """ + Vector will enrich data with Kubernetes context. A comprehensive + list of fields can be found in the + [`kubernetes_logs` source output docs](https://vector.dev/docs/reference/sources/kubernetes_logs/#output). + """ + } + filtering: { + #Subsection: { + title: string + body: string + } + name: "filtering" + title: "Filtering" + body: """ + Vector provides rich filtering options for Kubernetes log collection: + + * Built-in [`Pod`](#pod-exclusion) and [`container`](#container-exclusion) + exclusion rules. + * The `exclude_paths_glob_patterns` option allows you to exclude + Kuberenetes log files by the file name and path. + * The `extra_field_selector` option specifies the field selector to + filter Pods with, to be used in addition to the built-in `Node` filter. + * The `extra_label_selector` option specifies the label selector to + filter `Pod`s with, to be used in addition to the [built-in + `vector.dev/exclude` filter](#pod-exclusion). + """ + } + pod_exclusion: { + #Subsection: { + title: string + body: string + } + name: "pod_exclusion" + title: "Pod exclusion" + body: """ + By default, the [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + will skip logs from the `Pod`s that have a `vector.dev/exclude: "true"` *label*. + You can configure additional exclusion rules via label or field selectors, + see [the available options](https://vector.dev/docs/reference/sources/kubernetes_logs/#configuration). + """ + } + container_exclusion: { + #Subsection: { + title: string + body: string + } + name: "container_exclusion" + title: "Container exclusion" + body: """ + The [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + can skip the logs from the individual `container`s of a particular + `Pod`. Add an *annotation* `vector.dev/exclude-containers` to the + `Pod`, and enumerate the `name`s of all the `container`s to exclude in + the value of the annotation like so: + + ``` + vector.dev/exclude-containers: "container1,container2" + ``` + + This annotation will make Vector skip logs originating from the + `container1` and `container2` of the `Pod` marked with the annotation, + while logs from other `container`s in the `Pod` will still be + collected. + """ + } + kubernetes_api_communication: { + #Subsection: { + title: string + body: string + } + name: "kubernetes_api_communication" + title: "Kubernetes API communication" + body: """ + Vector communicates with the Kubernetes API to enrich the data it collects with + Kubernetes context. Therefore, Vector must have access to communicate with the + [Kubernetes API server](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). If Vector is running in + a Kubernetes cluster then Vector will connect to that cluster using the + [Kubernetes provided access information](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod). + + In addition to access, Vector implements proper desync handling to ensure + communication is safe and reliable. This ensures that Vector will not overwhelm + the Kubernetes API or compromise its stability. + """ + } + partial_message_merging: { + #Subsection: { + title: string + body: string + } + name: "partial_message_merging" + title: "Partial message merging" + body: """ + Vector, by default, will merge partial messages that are + split due to the Docker size limit. For everything else, it + is recommended to use the [`reduce` + transform](https://vector.dev/docs/reference/transforms/reduce/) which offers + the ability to handle custom merging of things like + stacktraces. + """ + } + pod_removal: { + #Subsection: { + title: string + body: string + } + name: "pod_removal" + title: "Pod removal" + body: """ + To ensure all data is collected, Vector will continue to collect logs from the + `Pod` for some time after its removal. This ensures that Vector obtains some of + the most important data, such as crash details. + """ + } + resource_limits: { + #Subsection: { + title: string + body: string + } + name: "resource_limits" + title: "Resource limits" + body: "Vector recommends the following resource limits." + sub_sections: [{ + title: "Agent resource limits" + body: """ + If deploy Vector as an agent (collecting data for each of your + Nodes), then we recommend the following limits: + + ```yaml + resources: + requests: + memory: "64Mi" + cpu: "500m" + limits: + memory: "1024Mi" + cpu: "6000m" + ``` + + **As with all Kubernetes resource limit recommendations, use these + as a reference point and adjust as ncessary. If your configured + Vector pipeline is complex, you may need more resources. If you + have a pipeline you may need less.** + """ + }] + } + state_management: { + #Subsection: { + title: string + body: string + } + name: "state_management" + title: "State management" + body: null + sub_sections: [{ + title: "Agent state management" + body: """ + For the agent role, Vector stores its state at the host-mapped dir with a static + path, so if it's redeployed it'll continue from where it was interrupted. + """ + }] + } + testing_and_reliability: { + #Subsection: { + title: string + body: string + } + name: "testing_and_reliability" + title: "Testing & reliability" + body: """ + Vector is tested extensively against Kubernetes. In addition to Kubernetes + being Vector's most popular installation method, Vector implements a + comprehensive end-to-end test suite for all minor Kubernetes versions starting + with `1.14. + """ + } + state: { + #Subsection: { + title: string + body: string + } + name: "state" + title: "State" + body: "This component is stateless, meaning its behavior is consistent across each input." + } + checkpointing: { + #Subsection: { + title: string + body: string + } + name: "checkpointing" + title: "Checkpointing" + body: """ + Vector checkpoints the current read position after each + successful read. This ensures that Vector resumes where it left + off if restarted, preventing data from being read twice. The + checkpoint positions are stored in the data directory which is + specified via the global `data_dir` option, but can be overridden + via the `data_dir` option in the file source directly. + """ + } + kubernetes_api_access_control: { + #Subsection: { + title: string + body: string + } + name: "kubernetes_api_access_control" + title: "Kubernetes API access control" + body: """ + Vector requires access to the Kubernetes API. + Specifically, the [`kubernetes_logs` source](https://vector.dev/docs/reference/sources/kubernetes_logs/) + uses the `/api/v1/pods` endpoint to "watch" the pods from + all namespaces. + + Modern Kubernetes clusters run with RBAC (role-based access control) + scheme. RBAC-enabled clusters require some configuration to grant Vector + the authorization to access the Kubernetes API endpoints.\tAs RBAC is + currently the standard way of controlling access to the Kubernetes API, + we ship the necessary configuration out of the box: see `ClusterRole`, + `ClusterRoleBinding` and a `ServiceAccount` in our `kubectl` YAML + config, and the `rbac` configuration at the Helm chart. + + If your cluster doesn't use any access control scheme\tand doesn't + restrict access to the Kubernetes API, you don't need to do any extra + configuration - Vector willjust work. + + Clusters using legacy ABAC scheme are not officially supported + (although Vector might work if you configure access properly) - + we encourage switching to RBAC. If you use a custom access control + scheme - make sure Vector `Pod`/`ServiceAccount` is granted access to + the `/api/v1/pods` resource. + """ + } + context: { + #Subsection: { + title: string + body: string + } + name: "context" + title: "Context" + body: """ + By default, the `kubernetes_logs` source will augment events with helpful + context keys as shown in the "Output" section. + """ + } + } + name: "kubernetes" + } + } + roles: { + aggregator: { + name: "aggregator" + title: "Aggregator" + description: """ + The aggregator role is designed for central processing, collecting data from + multiple upstream sources and performing cross-host aggregation and analysis. + + For Vector, this role should be reserved for exactly that: cross-host aggregation + and analysis. Vector is unique in the fact that it can serve both as an agent + and aggregator. This makes it possible to distribute processing along the edge + (recommended). We highly recommend pushing processing to the edge when possible + since it is more efficient and easier to manage. + """ + sub_roles: {} + } + agent: { + name: "agent" + title: "Agent" + sub_roles: { + daemon: { + name: "daemon" + title: "Daemon" + description: """ + The daemon role is designed to collect _all_ data on a single host. This is + the recommended role for data collection since it the most efficient use + of host resources. Vector implements a directed acyclic graph topology model, + enabling the collection and processing from mutliple services. + """ + } + sidecar: { + name: "sidecar" + title: "Sidecar" + description: """ + The sidecar role couples Vector with each service, focused on data collection + for that individual service only. While the deamon role is recommended, the + sidecar role is beneficial when you want to shift reponsibility of data + collection to the service owner. And, in some cases, it can be simpler to + manage. + """ + } + } + } + } +} +process: { + #ExitCode: { + code: int + description: string + } + #ExitCodes: {} + #Signal: { + description: string + name: string + } + #Signals: {} + exit_codes: { + "0": { + code: 0 + description: "Exited successfully." + } + "1": { + code: 1 + description: "Exited with a generic error." + } + "78": { + code: 78 + description: "Configuration is invalid." + } + } + signals: { + SIGHUP: { + description: "Reloads configuration on the fly." + name: "SIGHUP" + } + SIGTERM: { + description: "Initiates graceful shutdown process." + name: "SIGTERM" + } + } +} +releases: { + #Commit: { + author: string + breaking_change: bool + date: =~"^\\d{4}-\\d{2}-\\d{2}" + description: string + deletions_count: >=0 & int + files_count: >=0 & int + insertions_count: >=0 & int + pr_number: >=0 & int | null + scopes: [] + sha: =~"^[a-z0-9]{40}$" + type: "chore" | "docs" | "enhancement" | "feat" | "fix" | "perf" | "status" + } + #CommitSha: =~"^[a-z0-9]{40}$" + "0.6.0": { + date: "2019-12-12" + codename: "The Long Stretch" + whats_next: [] + commits: [{ + sha: "15b44d04a06c91d5e0d1017b251c32ac165f2bd6" + date: "2019-10-10 15:01:52 +0000" + description: "Push docker images so that `latest` tags are last" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "4bc7696077e691f59811e8b1e078f1b029fe63a6" + date: "2019-10-10 15:19:21 +0000" + description: "Print grease command output" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0f5266193c6ae8d7d47907c906e34598e36f2057" + date: "2019-10-11 09:58:21 +0000" + description: "Do not release Github or Homebrew on nightly" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 0 + deletions_count: 7 + }, { + sha: "1e1d66e04722841e3e0dc9b6d7d85c75379d1caf" + date: "2019-10-11 09:08:43 +0000" + description: "Make global options actually use default" + pr_number: 1013 + scopes: ["cli"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 6 + insertions_count: 56 + deletions_count: 40 + }, { + sha: "a7f7ffa879cd310beca498a600537707b7aee896" + date: "2019-10-11 10:23:18 +0000" + description: "Add relevant when details to config spec" + pr_number: 1016 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 17 + insertions_count: 74 + deletions_count: 2 + }, { + sha: "1f0c52bcb931bd2e10fa09557e343af50513e166" + date: "2019-10-11 12:26:22 +0000" + description: "List out component options as linkable sections" + pr_number: 1019 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 95 + insertions_count: 3115 + deletions_count: 3683 + }, { + sha: "5510b176ce0645d9893ea0e92ac2f73d58515e38" + date: "2019-10-14 02:13:53 +0000" + description: "Add ca certificates for docker image" + pr_number: 1014 + scopes: ["docker platform"] + type: "fix" + breaking_change: false + author: "Lincoln Lee" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "d4aac2e13c8c3f285cfeb95a6c22695fe07cb18e" + date: "2019-10-13 18:50:02 +0000" + description: "Further improve options documentation for each component" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 122 + insertions_count: 3957 + deletions_count: 3593 + }, { + sha: "21506409f8bf1311dfb4cd7ce8539d049dd4a5cd" + date: "2019-10-13 18:53:10 +0000" + description: "Remove superflous tags in config examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 42 + insertions_count: 458 + deletions_count: 456 + }, { + sha: "aa02c432cca22a9fd8f7425c839156f2613e3e7b" + date: "2019-10-13 19:47:18 +0000" + description: "Dont repeat default value in configuration examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 45 + insertions_count: 468 + deletions_count: 480 + }, { + sha: "d04a3034e3a6ea233be44ddaf59e07c6340d5824" + date: "2019-10-14 15:10:55 +0000" + description: "Initial `datadog_metrics` implementation" + pr_number: 967 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 16 + insertions_count: 1085 + deletions_count: 1 + }, { + sha: "1378575334e0032de645c8277683f73cf640eb97" + date: "2019-10-15 01:43:09 +0000" + description: "Remove debian cache to reduce image size" + pr_number: 1028 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lincoln Lee" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c1b36be946a2103a6c5eff77e288f32898a3bbe3" + date: "2019-10-13 19:49:38 +0000" + description: "Dont label unit in config examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 20 + insertions_count: 80 + deletions_count: 76 + }, { + sha: "571e1390bd4a5455a5b1305ace8fd1724a761ddd" + date: "2019-10-14 19:25:25 +0000" + description: "Add back section references to option descriptions" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 45 + insertions_count: 348 + deletions_count: 334 + }, { + sha: "22efd48c90d91c9fa9a4d102e54ffb3d869945f3" + date: "2019-10-15 12:31:14 +0000" + description: "Ensure log_to_metric tags option shows in example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "5dd167a462930da589f842a366334d65be17d185" + date: "2019-10-15 12:32:52 +0000" + description: "Fix metrics examples syntax error" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "f804cebad4ed97f0da105effbe72b593a846ff9d" + date: "2019-10-15 12:36:11 +0000" + description: "Fix log data model" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "a3c7c752e3fec7d3c5d84d4452e1243b263a3ae8" + date: "2019-10-16 15:30:34 +0000" + description: "Add `commit_interval_ms` option" + pr_number: 944 + scopes: ["kafka source"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 17 + deletions_count: 5 + }, { + sha: "8aaa22524c13a184a8ce0c8eeaa744d556ed4841" + date: "2019-10-16 19:19:15 +0000" + description: "Fix typos" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "20bc1a29af0ad4cab9f86482873e942627d366c2" + date: "2019-10-17 14:38:27 +0000" + description: "Put buffering tests behind `leveldb` feature" + pr_number: 1046 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "22fd9ef6f07b4372512185270b729ad0fd21b49c" + date: "2019-10-17 15:45:52 +0000" + description: "Update `tower-limit` to `v0.1.1`" + pr_number: 1018 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "32ed04fb529fcb6a10dfed101dff04447357cf13" + date: "2019-10-17 22:47:58 +0000" + description: "Resolve inability to shutdown Vector when std…" + pr_number: 960 + scopes: ["stdin source"] + type: "fix" + breaking_change: false + author: "AlyHKafoury" + files_count: 1 + insertions_count: 39 + deletions_count: 17 + }, { + sha: "3e8c906e791505732cea3608fbac9c1878a141bd" + date: "2019-10-17 18:41:54 +0000" + description: "Add address and path to the syslog source example config" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "fe26627b13797465d7a94a7ea1e63a7266bf7d42" + date: "2019-10-18 13:04:52 +0000" + description: "Bump version in Cargo.toml before releasing" + pr_number: 1048 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 23 + deletions_count: 0 + }, { + sha: "875de183748ba7939f53d1c712f1ea1aff7017a8" + date: "2019-10-18 22:15:06 +0000" + description: "Update leveldb-sys up to 2.0.5" + pr_number: 1055 + scopes: ["platforms"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "b2a3c25bbf9e33a9d167eef1ca28d606f405b670" + date: "2019-10-21 14:19:44 +0000" + description: "Apply some fixes for clippy lints" + pr_number: 1034 + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 36 + insertions_count: 188 + deletions_count: 204 + }, { + sha: "616d14abf59ac6e29c356fbf43e108dd7a438d35" + date: "2019-10-21 16:54:41 +0000" + description: "Require `encoding` option for console and file sinks" + pr_number: 1033 + scopes: ["config"] + type: "fix" + breaking_change: true + author: "Luke Steensen" + files_count: 17 + insertions_count: 116 + deletions_count: 61 + }, { + sha: "b6a8778949d9fbb36637bec13bf9a9b03762663b" + date: "2019-10-23 06:22:55 +0000" + description: "Bundle install should print output on error" + pr_number: 1068 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Yeonghoon Park" + files_count: 1 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "f90f50abec9f5848b12c216e2962ad45f1a87652" + date: "2019-10-22 16:32:08 +0000" + description: "Add support for systemd socket activation" + pr_number: 1045 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 23 + insertions_count: 199 + deletions_count: 70 + }, { + sha: "afc1edab8b726291850674d6fbbf7c66af2ba6aa" + date: "2019-10-23 15:08:45 +0000" + description: "Add OpenSSL and pkg-config to development requirements" + pr_number: 1066 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 9 + deletions_count: 2 + }, { + sha: "e56f9503f09a7f97d96093775856a019d738d402" + date: "2019-10-23 18:27:01 +0000" + description: "Set default `drop_field` to true" + pr_number: null + scopes: ["regex_parser transform"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 13 + deletions_count: 1 + }, { + sha: "018db5f4c65662367cc749f3e4458271a2003e75" + date: "2019-10-24 09:02:53 +0000" + description: "Add `validate` sub command" + pr_number: 1064 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 269 + deletions_count: 83 + }, { + sha: "c112c4ac7f45e69fea312e7691566a3f9e8e3066" + date: "2019-10-24 12:11:00 +0000" + description: "Metrics buffer & aggregation" + pr_number: 930 + scopes: ["metric data model"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 7 + insertions_count: 875 + deletions_count: 136 + }, { + sha: "137e9ea7495eabca272207a904b9dd4c2f82d6af" + date: "2019-10-24 14:57:57 +0000" + description: "Use rdkafka crate from the upstream Git repository" + pr_number: 1063 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 118 + deletions_count: 127 + }, { + sha: "4cde6dc5021d06e07393af135d0625178385802a" + date: "2019-10-24 15:13:08 +0000" + description: "Check config examples" + pr_number: 1082 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 37 + insertions_count: 18 + deletions_count: 635 + }, { + sha: "ef5ec5732fd4f677f0b25e3f6e470c37d0f73855" + date: "2019-10-24 11:52:44 +0000" + description: "Fix a couple minor issues with checkpointing" + pr_number: 1086 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 17 + deletions_count: 12 + }, { + sha: "8fef7056a1d1c515014e721a2940d04ff269a704" + date: "2019-10-24 13:17:07 +0000" + description: "Fix merge problem in Cargo.lock" + pr_number: 1087 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "c97173fb472ffeb11902e3385dc212fdef8a0ffa" + date: "2019-10-25 09:40:42 +0000" + description: "Use metric buffer in Datadog sink" + pr_number: 1080 + scopes: ["datadog_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 17 + deletions_count: 17 + }, { + sha: "25813de321b097677e7c23069082b8e3597928e8" + date: "2019-10-28 14:20:14 +0000" + description: "Update `ctor` dependency" + pr_number: 1095 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "ccae97b37b04b590ddf64284fd593afdfb024b22" + date: "2019-10-28 14:50:20 +0000" + description: "Avoid dependency on platform-specific machine word size" + pr_number: 1096 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "7ca6dc31a3af3e6e08ef89a469923fa385e5df30" + date: "2019-10-28 15:02:09 +0000" + description: "Rework option to limit records to current boot in journald source" + pr_number: 1105 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 36 + deletions_count: 13 + }, { + sha: "f9a6776a4467cd8a5c4ffdaa44a8a5593f6471ac" + date: "2019-10-28 18:34:13 +0000" + description: "Wrap provider call with a tokio runtime" + pr_number: 1104 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 11 + deletions_count: 7 + }, { + sha: "603f1e3331e44c2b486cb8f5570109987b0a261e" + date: "2019-10-29 17:26:32 +0000" + description: "Update Rusoto to 0.38.0" + pr_number: 1112 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "David O'Rourke" + files_count: 8 + insertions_count: 80 + deletions_count: 77 + }, { + sha: "9e2f98e780fdca4380f701508eb6f35e924d8d8b" + date: "2019-10-29 20:30:57 +0000" + description: "Increase sleep interval in the tests for file source" + pr_number: 1113 + scopes: ["file source"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "539f7086459692fe8b52493cdf053220af687d92" + date: "2019-10-29 18:01:52 +0000" + description: "Update Rusoto to 0.41.x" + pr_number: 1114 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "David O'Rourke" + files_count: 5 + insertions_count: 79 + deletions_count: 116 + }, { + sha: "ddffd3b91588da87b3c3a1623ac1f7be842f2392" + date: "2019-10-29 14:42:21 +0000" + description: "Cursor/checkpoint fixes" + pr_number: 1106 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 77 + deletions_count: 12 + }, { + sha: "a5d442c9d311fb100d1912d5a0c422a847dbbdc3" + date: "2019-10-30 20:12:56 +0000" + description: "Use `rlua` crate from a fork with Pairs implementation" + pr_number: 1119 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 16 + deletions_count: 6 + }, { + sha: "219b9259bad71e36a7e1863c8add85a902bc057f" + date: "2019-10-30 18:13:29 +0000" + description: "Allow iteration over fields" + pr_number: 1111 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: false + author: "Steven Aerts" + files_count: 2 + insertions_count: 61 + deletions_count: 0 + }, { + sha: "ec2c9970ed16c3b06f5dc328b7edd6460db4f310" + date: "2019-10-30 20:48:54 +0000" + description: "Move example of iterating over all fields out of the autogenerated file" + pr_number: 1120 + scopes: ["lua transform"] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 26 + deletions_count: 13 + }, { + sha: "608e21abe8198a90b1100868b46550d63ab95c8c" + date: "2019-10-30 14:16:04 +0000" + description: "Flatten out region configuration in elasticsearch sink" + pr_number: 1116 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "55766802be0a6c35eb6e1f8d35be9081401b27de" + date: "2019-10-31 12:07:34 +0000" + description: "Improve topology tracing spans" + pr_number: 1123 + scopes: ["observability"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 47 + insertions_count: 276 + deletions_count: 22 + }, { + sha: "641bc4242c7e86cde031a51e4228edb0a66bec27" + date: "2019-10-31 20:03:31 +0000" + description: "Update grok to version 1.0.1" + pr_number: 1124 + scopes: ["grok_parser transform"] + type: "enhancement" + breaking_change: false + author: "Michael Nitschinger" + files_count: 2 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "67ee5cc3055da22e5f9eb4861f8be383c2f72f1c" + date: "2019-10-31 14:56:23 +0000" + description: "Limit journald records to the current boot" + pr_number: 1122 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 34 + deletions_count: 11 + }, { + sha: "70482ab33c44226f392877461cb8be833f8bbdd6" + date: "2019-11-01 08:44:37 +0000" + description: "Abstracts runtime into runtime.rs" + pr_number: 1098 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Michael-J-Ward" + files_count: 23 + insertions_count: 170 + deletions_count: 98 + }, { + sha: "e13b2131dbe297be8ce53f627affe52a9a26ca5d" + date: "2019-11-04 14:29:31 +0000" + description: "Add Cargo.toml version check to CI" + pr_number: 1102 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 84 + deletions_count: 10 + }, { + sha: "933fd510ba4e8ae7a6184515371d7a3c0d97dc75" + date: "2019-11-04 15:23:32 +0000" + description: "Handle edge cases in the Cargo.toml version check" + pr_number: 1138 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "6f236505b5808e0da01cd08df20334ced2f48edf" + date: "2019-11-04 15:29:42 +0000" + description: "Bump version in Cargo.toml to 0.6.0" + pr_number: 1139 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "3b3c824e98c8ae120f32ffb3603077792c165141" + date: "2019-11-04 10:13:29 +0000" + description: "Automatically create missing directories" + pr_number: 1094 + scopes: ["file sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 6 + insertions_count: 121 + deletions_count: 28 + }, { + sha: "d9550711ebcc3bd1033b4985efb3af469e8a4384" + date: "2019-11-04 11:35:33 +0000" + description: "Update lock file for 0.6" + pr_number: 1140 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "a0a5bee914ea94353d545e2d772978ba7963b20f" + date: "2019-11-04 23:33:29 +0000" + description: "Show Git version and target triple in `vector --version` output" + pr_number: 1044 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 40 + deletions_count: 17 + }, { + sha: "8be060fc48eb504c30f874fead15f144570cbeb3" + date: "2019-11-04 15:51:53 +0000" + description: "Update lock file" + pr_number: 1133 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 10 + insertions_count: 880 + deletions_count: 1380 + }, { + sha: "96bd716fc1c022831eb04afd633ede3efe809d28" + date: "2019-11-05 09:15:57 +0000" + description: "Flush and reset any current filter before applying new filter" + pr_number: 1135 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "David Howell" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c7ad707ed296a93e3d82bff2b3d7793178d50bcc" + date: "2019-11-06 09:10:51 +0000" + description: "Ensure internal rate limiting is logged" + pr_number: 1151 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "eb0566313849002fa820d57cc15d8a9ec957b9d3" + date: "2019-11-06 22:17:55 +0000" + description: "Use inventory for plugins" + pr_number: 1115 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 42 + insertions_count: 280 + deletions_count: 40 + }, { + sha: "fefe9ef4c8f1f20513bc31545d36ab00ed09c4a7" + date: "2019-11-07 10:22:10 +0000" + description: "Fix metrics batch strategy in sinks" + pr_number: 1141 + scopes: ["aws_cloudwatch_metrics sink"] + type: "fix" + breaking_change: false + author: "Alexey Suslov" + files_count: 6 + insertions_count: 7 + deletions_count: 17 + }, { + sha: "f4adfd716034141f367e93bebf283d703c09dfaa" + date: "2019-11-08 14:30:47 +0000" + description: "Refactor the batching configuration" + pr_number: 1154 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 12 + insertions_count: 132 + deletions_count: 130 + }, { + sha: "cfab2339b9b3f8117d816015d6523976b38190cc" + date: "2019-11-08 20:35:06 +0000" + description: "Add `list` subcommand" + pr_number: 1156 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 98 + deletions_count: 1 + }, { + sha: "200dccccc58cf5f7fec86b3124ed00e9ad0d5366" + date: "2019-11-08 15:58:14 +0000" + description: "Stop accidentally requiring region for ES" + pr_number: 1161 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 24 + deletions_count: 6 + }, { + sha: "4b831475ed4cb6a016b18b4fa4f2457f0591ce21" + date: "2019-11-09 18:36:10 +0000" + description: "Bump loofah from 2.2.3 to 2.3.1 in /scripts" + pr_number: 1163 + scopes: ["operatons"] + type: "chore" + breaking_change: false + author: "dependabot[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "32cfe37c87a01ae08b61627d31be73ecf840d375" + date: "2019-11-11 17:30:27 +0000" + description: "Use vendored OpenSSL" + pr_number: 1170 + scopes: ["platforms"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 20 + deletions_count: 17 + }, { + sha: "fb9c17a26959e8276770a86307807721cd2ded25" + date: "2019-11-11 09:37:36 +0000" + description: "upgrade to rust 1.39.0" + pr_number: 1159 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8de50f4603b3e7626af27b24d9a350eaadb9b4e7" + date: "2019-11-11 20:34:23 +0000" + description: "Add `clean` target to Makefile" + pr_number: 1171 + scopes: ["operations"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "f9d3111015352910e71dab210c376b09cdd26333" + date: "2019-11-12 00:09:45 +0000" + description: "Fixes a bug droping parsed field" + pr_number: 1167 + scopes: ["json_parser transform"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 24 + deletions_count: 4 + }, { + sha: "a272f633464ce06ab28e5d9a7c1e7d6b595c61ec" + date: "2019-11-13 13:16:25 +0000" + description: "`host` is not required when provider is AWS" + pr_number: 1164 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 112 + deletions_count: 60 + }, { + sha: "2100100b5cda0f57292a17bbf4473ed543811f39" + date: "2019-11-13 15:34:38 +0000" + description: " Limit the number of CircleCI build jobs to 8" + pr_number: 1176 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "2e2af43786ff0dbc292f98cedc830791d1e20937" + date: "2019-11-13 15:34:59 +0000" + description: "Fix missed `cargo fmt` run on elasticsearch sink" + pr_number: 1175 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 3 + deletions_count: 1 + }, { + sha: "cfb66e5b90007d9a5dc461afa80e6d3e190febcf" + date: "2019-11-13 17:21:05 +0000" + description: "Don't drop parsed field" + pr_number: 1172 + scopes: ["grok_parser transform"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 20 + deletions_count: 1 + }, { + sha: "e0433fd1ada425c1f5c9505426fa362aae14249e" + date: "2019-11-13 21:23:21 +0000" + description: "Add support for target field configuration" + pr_number: 1165 + scopes: ["json_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 152 + deletions_count: 3 + }, { + sha: "e503057ff3616569521a208abbbed8c3e8fbc848" + date: "2019-11-14 10:49:59 +0000" + description: "Add `generate` subcommand" + pr_number: 1168 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 6 + insertions_count: 272 + deletions_count: 6 + }, { + sha: "de0a6734710a6c63c969048a06d3b55ae1637c87" + date: "2019-11-14 21:24:43 +0000" + description: "Use `strptime` instead of `strftime` in docs where appropriate" + pr_number: 1183 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 13 + insertions_count: 28 + deletions_count: 28 + }, { + sha: "fc2c1db5824f8499190efa078c993f3f52737043" + date: "2019-11-14 20:23:38 +0000" + description: "Support default environment variable values" + pr_number: 1185 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 1 + insertions_count: 11 + deletions_count: 4 + }, { + sha: "20ba2575f40944b36c7bbd9e4d821452626f288b" + date: "2019-11-14 23:49:51 +0000" + description: "Update rdkafka to fix rdkafka/cmake feature" + pr_number: 1186 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "84d830b57de1798b2aac61279f7a0ae99f854241" + date: "2019-11-14 23:50:35 +0000" + description: "Use leveldb from fork with improved portability" + pr_number: 1184 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "3ce0b4ed645d2844f1f6c5308409e2e9466c0799" + date: "2019-11-14 23:50:59 +0000" + description: "Increase wait timeouts in tests which otherwise fail on slow CPUs" + pr_number: 1181 + scopes: ["testing"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "0884f5d90ca2162aaa0ea6b9ab5d2e10a026a286" + date: "2019-11-19 17:35:50 +0000" + description: "Control which version of leveldb-sys to use with features" + pr_number: 1191 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "90388ed57afea24d569b2317d97df7035211b252" + date: "2019-11-19 17:39:05 +0000" + description: "Support `armv7-unknown-linux` (Raspberry Pi, etc) platforms" + pr_number: 1054 + scopes: ["new platform"] + type: "feat" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 366 + deletions_count: 0 + }, { + sha: "d58139caf6cdb15b4622360d7c9a04a8c86724d6" + date: "2019-11-19 17:41:09 +0000" + description: "Support `aarch64-unknown-linux` (ARM64, Raspberry Pi, etc) platforms" + pr_number: 1193 + scopes: ["new platform"] + type: "feat" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 347 + deletions_count: 10 + }, { + sha: "1b833eb6d693d4c281aa51c332202eb2796ba4db" + date: "2019-11-19 15:24:03 +0000" + description: "Re-fix journald cursor handling and libsystemd name" + pr_number: 1202 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 34 + deletions_count: 37 + }, { + sha: "2d2fadb2599d99ded3d73286fe17a67d20d23805" + date: "2019-11-19 16:51:07 +0000" + description: "New website and documentation" + pr_number: 1207 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 496 + insertions_count: 39821 + deletions_count: 23643 + }, { + sha: "2d419d57d5ab6072bc1058126bc3be50fa57c835" + date: "2019-11-20 00:27:10 +0000" + description: "Initial `ansi_stripper` transform implementation" + pr_number: 1188 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 158 + deletions_count: 0 + }, { + sha: "55b68910ee3d80fdf302faf5a5bc9aa1f68e8dce" + date: "2019-11-20 14:37:14 +0000" + description: "Fix README banner" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 146 + deletions_count: 2 + }, { + sha: "458f6cc0e3fbc6fded1fdf8d47dedb2d0be3bb2d" + date: "2019-11-21 08:36:02 +0000" + description: "Initial `geoip` transform implementation" + pr_number: 1015 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Amit Saha" + files_count: 6 + insertions_count: 286 + deletions_count: 0 + }, { + sha: "803c7f98349a4d07bfc68bc7f10a80c165698f1a" + date: "2019-11-20 21:31:34 +0000" + description: "Small website and documentation improvements" + pr_number: 1215 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 28 + insertions_count: 880 + deletions_count: 307 + }, { + sha: "fb6a1dc7d41a73869b36d20863f410a3f3d9a844" + date: "2019-11-21 00:14:23 +0000" + description: "Small changes to website homepage styles" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "42ca451408b42db43ea2597509e0ce85b44059a9" + date: "2019-11-21 15:28:49 +0000" + description: "Fix some URLs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 7 + deletions_count: 11 + }, { + sha: "9fe1eeb4786b27843673c05ff012f6b5cf5c3e45" + date: "2019-11-21 15:39:33 +0000" + description: "Allow >1 config targets for validate command" + pr_number: 1218 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 82 + deletions_count: 91 + }, { + sha: "3af177516728cc4a78a198f69d1cb6b0f0b093fc" + date: "2019-11-21 23:53:20 +0000" + description: "Fix components link in README" + pr_number: 1222 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "58246b306f0e927cfc2ffcfb6f023c146846db0e" + date: "2019-11-21 16:13:16 +0000" + description: "Rename components section to reference in docs" + pr_number: 1223 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 134 + insertions_count: 740 + deletions_count: 4232 + }, { + sha: "89c50b177689cbacf4dc3f930ebbe2b264046b8a" + date: "2019-11-21 16:30:11 +0000" + description: "Styling fixes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 13 + deletions_count: 4 + }, { + sha: "5b38129d0de1185235e630a571e31c3e9f5ab85c" + date: "2019-11-22 00:49:04 +0000" + description: "Fix restoring of `rust-toolchain` file" + pr_number: 1224 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 5 + deletions_count: 3 + }, { + sha: "5f39c2f3515d958d40c9a6187c59806c4731c91c" + date: "2019-11-22 01:25:18 +0000" + description: "Produce archives for `armv7-unknown-linux-musleabihf`" + pr_number: 1225 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 29 + deletions_count: 1 + }, { + sha: "a1410f69382bd8036a7046a156c64f56e8f9ef33" + date: "2019-11-22 02:01:41 +0000" + description: "Support `x86_64-pc-windows-msvc` (Windows 7+) platform" + pr_number: 1205 + scopes: ["new platform"] + type: "feat" + breaking_change: false + author: "Alexander Rodin" + files_count: 15 + insertions_count: 337 + deletions_count: 72 + }, { + sha: "bf9402b2151d976edd42b35d08c1722de7ec2b9b" + date: "2019-11-21 23:06:41 +0000" + description: "Update downloads links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 11 + insertions_count: 144 + deletions_count: 53 + }, { + sha: "5062b39a82949c86fdc80658085a88b78a24a27c" + date: "2019-11-22 12:58:49 +0000" + description: "Fix `check-generate` check in CI" + pr_number: 1226 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 8 + insertions_count: 398 + deletions_count: 374 + }, { + sha: "befb29916c2d19827303109769ca824fbd167870" + date: "2019-11-22 14:15:54 +0000" + description: "Use bash from Docker containers as a shell in Circle CI" + pr_number: 1227 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 13 + deletions_count: 5 + }, { + sha: "6052cbc9a00eac0b2db96651730bd730c39ca83e" + date: "2019-11-22 14:51:24 +0000" + description: "Fix invocation of check jobs" + pr_number: 1229 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 12 + deletions_count: 12 + }, { + sha: "d7a0fd1362f7b99a3bac344434d2a50305f1fa2e" + date: "2019-11-22 16:04:48 +0000" + description: "Verify `zip` archives for `x86_64-pc-windows-msvc` in `wine`" + pr_number: 1228 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 17 + deletions_count: 10 + }, { + sha: "7906dcae3c0a43c99880f2cea9aeb01de629157c" + date: "2019-11-22 10:25:57 +0000" + description: "Update to docusaurus alpha.36" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 82 + deletions_count: 90 + }, { + sha: "49a861ab3045570f1e173c56fa23291e014856a2" + date: "2019-11-22 11:22:16 +0000" + description: "Fix curl commands mentioned in #1234" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "39bd126fe67b048003532c178c64be90ef4cec62" + date: "2019-11-22 16:49:49 +0000" + description: "Run nightly builds at 5pm UTC" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "fe32fdc5d222182f18e4118af28d72d4b06dca0d" + date: "2019-11-22 13:19:53 +0000" + description: "Redraw diagram to fix an initial load issue in Chrome" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 6 + }, { + sha: "6de3e4f3a725c978ccaa95c5a9180df202c5a074" + date: "2019-11-22 15:45:12 +0000" + description: "Rerender diagram to fix Chrome update issue" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 10 + deletions_count: 7 + }, { + sha: "12d36bbe2eb223ab89335b61dfbb7e18c4649981" + date: "2019-11-22 16:43:22 +0000" + description: "More Chrome fixes" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "f9396da79b49f617ce93d6be233f9592831fab2d" + date: "2019-11-22 17:00:30 +0000" + description: "Fix Chrome sorting issue" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "662c5d1346ea2b01c0bc3c11c648cbdf92035fe2" + date: "2019-11-22 19:32:52 +0000" + description: "Fix readme" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 47 + deletions_count: 182 + }, { + sha: "cb6571798af5b80c123905b4cac3a56a67fc3181" + date: "2019-11-22 19:36:11 +0000" + description: "Fix readme component counts" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 11 + deletions_count: 11 + }, { + sha: "1f401a68bdb5c0bcfc9d0385f49a70f22fbce5d9" + date: "2019-11-23 11:50:14 +0000" + description: "Make `openssl/vendored` feature optional" + pr_number: 1239 + scopes: ["platforms"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "52a49d5a32f091eec7c174b02803f7fc3ca5af34" + date: "2019-11-23 04:21:20 +0000" + description: "Accept metric events, too" + pr_number: 1237 + scopes: ["blackhole sink"] + type: "enhancement" + breaking_change: false + author: "Austin Seipp" + files_count: 1 + insertions_count: 8 + deletions_count: 6 + }, { + sha: "457f964bde42fce3b92e5bd1a65ef6192c404a16" + date: "2019-11-23 13:27:51 +0000" + description: "Update `openssl` dependency" + pr_number: 1240 + scopes: ["platforms"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 14 + deletions_count: 14 + }, { + sha: "cdee561f8c1a023b77c5db712cc081b90570eb55" + date: "2019-11-23 15:49:09 +0000" + description: "Don't put *.erb files to configs directory" + pr_number: 1241 + scopes: ["platforms"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "b449b2b67f077760215294c418688c27f3f629a0" + date: "2019-11-23 22:51:25 +0000" + description: "Document installation on Windows" + pr_number: 1235 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 37 + insertions_count: 1064 + deletions_count: 351 + }, { + sha: "663be72997339cb9c30f935d9ef4c8e7732bc56c" + date: "2019-11-23 15:01:47 +0000" + description: "Add docker to homepage" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 151 + deletions_count: 1 + }, { + sha: "732265e9be0ae4c5add4679ef11fe808032c8f78" + date: "2019-11-23 15:13:26 +0000" + description: "Update docker image" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "5c15a3c6c7811315ff980e57f685d7fd3616ca7e" + date: "2019-11-23 15:40:52 +0000" + description: "Fix administrating doc" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 31 + deletions_count: 1 + }, { + sha: "068ae60a963523e540f2f404545e287a8b161037" + date: "2019-11-23 15:41:36 +0000" + description: "Add administration to docs sidebar" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "cdcd624da93fd36676e84426b8ec93917a90c8e1" + date: "2019-11-23 20:46:47 +0000" + description: "Add C++ toolchain installation step" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "d7b7735ae57e362e8255a59a578ac12f4b438119" + date: "2019-11-24 01:14:17 +0000" + description: "Attempt to fix website theme flickering" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 25 + deletions_count: 20 + }, { + sha: "1ec95b9df9a1f0456c02dcfd9824024ed7516fcc" + date: "2019-11-24 10:26:30 +0000" + description: "Describe build features" + pr_number: 1243 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 82 + deletions_count: 25 + }, { + sha: "37e60137b4fab70dc97cc177ecd6f1c81b1c86b0" + date: "2019-11-24 12:03:02 +0000" + description: "Add ARMv7 to installation docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 84 + deletions_count: 3 + }, { + sha: "8698eb922c5e1a1a0906fe25e2e9f2a39acb9c06" + date: "2019-11-24 12:24:10 +0000" + description: "Various installation docs corrections, closes #1234" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 8 + insertions_count: 27 + deletions_count: 15 + }, { + sha: "818c28228965d9d0b691e18298127eb5666d7865" + date: "2019-11-24 12:26:07 +0000" + description: "Remove Alogia search until it has indexed everything" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "b00996fc6949d6d34fcd13f685b5b91d116f4e8c" + date: "2019-11-24 21:56:40 +0000" + description: "Fix passing environment variables inside the CI Docker containers" + pr_number: 1233 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 16 + deletions_count: 7 + }, { + sha: "604b40d15bcbfb62eae0ca314ffad06a365ccc85" + date: "2019-11-24 15:06:09 +0000" + description: "Add operating system as a compenent attribute and filter" + pr_number: 1244 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 59 + insertions_count: 619 + deletions_count: 141 + }, { + sha: "dde45458aa375d5c9e1eb7beb4bf9fe102ccb0db" + date: "2019-11-24 15:56:01 +0000" + description: "Fix operating system filter" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "0cad20f837f1f682f9a5b976e150417484e4839f" + date: "2019-11-24 16:02:39 +0000" + description: "Dont show operating systems for transforms" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 16 + insertions_count: 33 + deletions_count: 33 + }, { + sha: "cad2349778d5d42e71ed12c7cf974e6f9ef731d5" + date: "2019-11-24 17:14:28 +0000" + description: "Fix broken link on homepage" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9bdaf14ee089da0ab6dff3b464a3086fc709cec6" + date: "2019-11-24 21:43:05 +0000" + description: "Add sidebar background and ga id" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "545ea5b0c1f88fc8ee42c9bce13358155bbf34fe" + date: "2019-11-25 11:12:50 +0000" + description: "Fix link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "f9c486ce4abcd77cf61ddc7fe2fadb4aeae3b806" + date: "2019-11-25 15:25:18 +0000" + description: "Fix name of `shiplift/unix-socket` feature" + pr_number: 1251 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "0e26cfd64a421b3b8296697e5dfca8d8ab35df6c" + date: "2019-11-25 00:08:26 +0000" + description: "Update dependencies" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 644 + deletions_count: 641 + }, { + sha: "9863f819c001827c400803b9fc0b1b71ea862244" + date: "2019-11-25 00:15:02 +0000" + description: "Fix Github issues links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 13 + deletions_count: 13 + }, { + sha: "42eabf66dc5138f43c7310b067064beaf3f8c29d" + date: "2019-11-25 10:42:39 +0000" + description: "Use the proper font in the configuration digram, ref #1234" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "9358c7a2d51ca259e38e49de5c2a46049146fead" + date: "2019-11-25 11:10:47 +0000" + description: "Enable Algolia search" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "8f18ad80302bf5975ad704271eb2c8d986b1c7d0" + date: "2019-11-25 11:30:11 +0000" + description: "Remove paginator from main doc content so that it is not included in search results" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 11 + deletions_count: 5 + }, { + sha: "d8fef3c66ce2072c003ba30704276e51c5267dc4" + date: "2019-11-25 12:20:05 +0000" + description: "Fix search field styling" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 42 + deletions_count: 9 + }, { + sha: "5492ae39c48d67e22fb983b9e55fa1cf5ee09dae" + date: "2019-11-25 12:25:24 +0000" + description: "Move main links in header to the left" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "357bdbbe9bf142eaf028a46e016e7b37e73a6e88" + date: "2019-11-26 05:38:57 +0000" + description: "Add JSON encoding option" + pr_number: 1174 + scopes: ["http sink"] + type: "enhancement" + breaking_change: false + author: "James Sewell" + files_count: 6 + insertions_count: 102 + deletions_count: 17 + }, { + sha: "969a426e0f9826e5bebf45ffb87fe7b2f785e7e7" + date: "2019-11-25 14:38:10 +0000" + description: "Reference exact latest version instead of \"latest\" in download URLs" + pr_number: 1254 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 153 + deletions_count: 61 + }, { + sha: "b29e4e309b9a13eff12f46cf00e21a76090e46fd" + date: "2019-11-25 14:38:34 +0000" + description: "Fix search bar styling on mobile" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 24 + deletions_count: 11 + }, { + sha: "ea81323033974a347bca458e5ab7e446b24228a3" + date: "2019-11-25 14:52:15 +0000" + description: "Add auto-generated comments to files that are auto-generated, closes #1256" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 114 + insertions_count: 655 + deletions_count: 101 + }, { + sha: "26333d9cf00bb5e44ae73aa17a7cab5583dc7d22" + date: "2019-11-25 14:27:35 +0000" + description: "Sleep to avoid split reads" + pr_number: 1236 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 16 + deletions_count: 6 + }, { + sha: "9b7fdca9f9f0d5818afbd821210f9f2c17ccc564" + date: "2019-11-25 15:49:57 +0000" + description: "Add CODEOWNERS file" + pr_number: 1248 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 71 + deletions_count: 0 + }, { + sha: "a9fbcb3ddbb3303f981257be064a995db59b7dbb" + date: "2019-11-25 21:56:15 +0000" + description: "Add `test` sub-command" + pr_number: 1220 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 38 + insertions_count: 2446 + deletions_count: 79 + }, { + sha: "3e92c1eac7a44b0661f25b452a112e5024edf7b3" + date: "2019-11-25 22:43:40 +0000" + description: "Re-generate unit test spec" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "a282db6df013b89d84694e68ecde38c4d544c1ba" + date: "2019-11-25 19:44:24 +0000" + description: "Add hash links to all headings" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 20 + deletions_count: 8 + }, { + sha: "aed6f1bf1cb0d3d10b360e16bd118665a49c4ea5" + date: "2019-11-26 12:24:33 +0000" + description: "Reorganise metric model" + pr_number: 1217 + scopes: ["metric data model"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 16 + insertions_count: 1389 + deletions_count: 1036 + }, { + sha: "df3e70980bfc9f6cde60516df482949fd0bc592b" + date: "2019-11-26 15:24:00 +0000" + description: "Turn \"executable\" bit off for some of docs files" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 21 + insertions_count: 0 + deletions_count: 0 + }, { + sha: "f20fc4ad3ea88d112d84be58eb51b4a5e85df21f" + date: "2019-11-26 16:35:36 +0000" + description: "Enrich events with metadata" + pr_number: 1149 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 39 + insertions_count: 505 + deletions_count: 298 + }, { + sha: "f5cfdfe2fb25703ea308992c3d106b5c4b3b7af1" + date: "2019-11-26 11:01:48 +0000" + description: "Testing documentation touchups" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 718 + deletions_count: 2 + }, { + sha: "c86b23818345136ea0bf911d92426440387b1620" + date: "2019-11-26 11:17:52 +0000" + description: "Fix examples syntax and parsing" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 19 + insertions_count: 198 + deletions_count: 177 + }, { + sha: "8fae3d0a5524f0172a97a1235c13305f660bc07f" + date: "2019-11-26 11:34:17 +0000" + description: "Clarify guarantees language to be feature specific not component specific" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 12 + deletions_count: 12 + }, { + sha: "57434aa05893d89300cee34f7aa2be7c6be7405b" + date: "2019-11-26 11:46:58 +0000" + description: "Fix docker source config examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 8 + deletions_count: 9 + }, { + sha: "18da561ba25843b13ce013f5a2052dfbff877b2b" + date: "2019-11-26 19:15:14 +0000" + description: "Fix sorting in make generate" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 35 + deletions_count: 43 + }, { + sha: "3db6403a24c16a36ba3367dedff006c9c9924626" + date: "2019-11-27 12:23:58 +0000" + description: "Add timeouts to crash tests" + pr_number: 1265 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 14 + deletions_count: 2 + }, { + sha: "df2b5d8016f27e868e0bb2a6feaf8bd99caaf64f" + date: "2019-11-27 17:03:31 +0000" + description: "Run `x86_64-pc-windows-msvc` tests in release mode" + pr_number: 1269 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c9f96ffaef533272103a167a5900edad1ed5946c" + date: "2019-11-27 10:22:21 +0000" + description: "Move env vars to reference section" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 11 + insertions_count: 204 + deletions_count: 41 + }, { + sha: "77e582b526680a22ea4da616cbfdb3b0ad281097" + date: "2019-11-27 19:19:04 +0000" + description: "Custom DNS resolution" + pr_number: 1118 + scopes: ["networking"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 11 + insertions_count: 733 + deletions_count: 3 + }, { + sha: "b255a52a6b53bcc1a9361ae746dde2c5d5fb9132" + date: "2019-11-27 13:44:27 +0000" + description: "Add env_vars key to all components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 109 + insertions_count: 3752 + deletions_count: 1697 + }, { + sha: "8fac7296e4c17969c08841a58ce7b64f2ede5331" + date: "2019-11-27 18:41:49 +0000" + description: "Fix rate_limit and retry option names" + pr_number: 1270 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 20 + insertions_count: 625 + deletions_count: 616 + }, { + sha: "0a06803a89aa3ca570edf72834abac52db94a0b8" + date: "2019-11-27 18:51:15 +0000" + description: "Fix variable field names" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 25 + insertions_count: 79 + deletions_count: 832 + }, { + sha: "e50767b1560288cb862bf9f933a4cc92e7b329a6" + date: "2019-11-27 19:10:37 +0000" + description: "Fix variable field names" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 26 + insertions_count: 95 + deletions_count: 72 + }, { + sha: "79f28aa15f26d73175467fb621ed87bf34240991" + date: "2019-11-27 19:38:05 +0000" + description: "Fix config examples category name" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 46 + insertions_count: 894 + deletions_count: 2210 + }, { + sha: "ae90038afb5d89eb080bd7c760ce3a4f1c67f219" + date: "2019-11-27 19:52:42 +0000" + description: "Fix example categories" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 24 + insertions_count: 53 + deletions_count: 70 + }, { + sha: "b3554b16fa333727e21c8eaae87df4533e217c96" + date: "2019-11-28 10:32:52 +0000" + description: "Build .deb packages for all musl targets" + pr_number: 1247 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 17 + insertions_count: 500 + deletions_count: 274 + }, { + sha: "10de21ba24814324547d53553ed098742279f935" + date: "2019-11-28 15:43:22 +0000" + description: "Typo" + pr_number: 1273 + scopes: [] + type: "docs" + breaking_change: false + author: "Dan Palmer" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9c531ca1e734234e187d82b76912bf5dfa188742" + date: "2019-11-28 10:51:03 +0000" + description: "Remove console.log" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "c815d27773da3acd0272ef009270f772a3103791" + date: "2019-11-29 15:29:25 +0000" + description: "Add a unit test guide" + pr_number: 1278 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 6 + insertions_count: 253 + deletions_count: 0 + }, { + sha: "7b5a7f322bffdbd7638791e32effa848deb1fdea" + date: "2019-11-29 12:01:14 +0000" + description: "Add topology section" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 90 + deletions_count: 23 + }, { + sha: "ecdb56f5f49920353e5696e936f2d711d6881bbd" + date: "2019-11-29 13:59:31 +0000" + description: "Default to centralized topology" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "1dc6e303079bf6a9bb9802fe108e77edf0b0fd83" + date: "2019-11-29 14:21:42 +0000" + description: "Fix rounded tabs" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 33 + deletions_count: 13 + }, { + sha: "4e98b8321cd334d780a5388bd848d83cb677003c" + date: "2019-11-30 00:15:17 +0000" + description: "Increase CI output timeout" + pr_number: 1272 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 8 + deletions_count: 1 + }, { + sha: "7dd271e9102d2a2eb2016f8d735c8d9710966210" + date: "2019-11-30 00:37:24 +0000" + description: "Delete unused OpenSSL patch" + pr_number: 1282 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 24 + }, { + sha: "32e5bfc2ff07ce0dddf817d5b64a2b04cc40f9ab" + date: "2019-11-29 22:11:41 +0000" + description: "Run nightly builds at 12am UTC" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "62992492de9c21e8a59464696b2ba226c50b82f0" + date: "2019-11-30 01:14:25 +0000" + description: "Set up redirects for x86_64-unknown-linux-gnu archives" + pr_number: 1284 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 23 + deletions_count: 5 + }, { + sha: "5fa10916882cd07ee6c6726be10227b321f5880c" + date: "2019-11-30 01:42:23 +0000" + description: "Build multi-arch Docker images" + pr_number: 1279 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 9 + insertions_count: 151 + deletions_count: 122 + }, { + sha: "5c6942f8e52971ec3eb95750d2a79574cb0c12bd" + date: "2019-11-30 02:06:35 +0000" + description: "Use `sidebar_label` as subpage title if possible" + pr_number: 1283 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 17 + deletions_count: 11 + }, { + sha: "4058ef356271a8276ddd6b1f41933d25ddd585a6" + date: "2019-11-30 02:06:47 +0000" + description: "Simplify platform names in \"downloads\" section" + pr_number: 1285 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "42c2a1f75e639ff29da5419cff29848fa3163d01" + date: "2019-11-30 10:13:42 +0000" + description: "Run nightly builds at 11am UTC" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "8f271aee3b9873b10a68ab5c747c4e895347acca" + date: "2019-11-30 13:15:43 +0000" + description: "Remove extra `setup_remote_docker` step from `relase-docker`" + pr_number: 1287 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 2 + }, { + sha: "643716654c9049e18c057d9e88de4e78f566d983" + date: "2019-11-30 13:15:56 +0000" + description: "Fix S3 release verification" + pr_number: 1286 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "08a297961a767d798ebb244a10baf05b318272e7" + date: "2019-11-30 18:26:36 +0000" + description: "Upgrade Docker on the step in which it is used" + pr_number: 1288 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 22 + deletions_count: 22 + }, { + sha: "70965d8e6d0c0d850faa86fb674987a107df9b93" + date: "2019-11-30 16:14:02 +0000" + description: "Cleanup installation docs" + pr_number: 1289 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 32 + insertions_count: 783 + deletions_count: 618 + }, { + sha: "469671dc457f867cee8bab247b6529026e7ae4ca" + date: "2019-12-01 11:21:05 +0000" + description: "Update to docaurus 2.0.0-alpha.37" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 242 + deletions_count: 229 + }, { + sha: "06a864b106bc2233c5d5a8ba78f045def8a937f6" + date: "2019-12-01 11:39:36 +0000" + description: "Group downloads by os" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 8 + insertions_count: 62 + deletions_count: 10 + }, { + sha: "8ee38009da9bcd41444e9cf2ed48683aa1870a1a" + date: "2019-12-01 13:15:28 +0000" + description: "Rename raspberry-pi to raspbian" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 10 + insertions_count: 44 + deletions_count: 25 + }, { + sha: "73dc9d55803733c460f42ce38e09b8c7c8344680" + date: "2019-12-01 13:29:57 +0000" + description: "Fix responsive styling on homepage" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 9 + deletions_count: 1 + }, { + sha: "3fc6196a6b6e2df7c76e9d5924377a2054dcb5e2" + date: "2019-12-01 23:44:37 +0000" + description: "Fix accessing custom front-matter in docs" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 13 + deletions_count: 5 + }, { + sha: "a6668b0c1db009b537c989ef95d8c4e616440cb9" + date: "2019-12-02 09:58:25 +0000" + description: "Build RPM packages for ARM" + pr_number: 1292 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 220 + deletions_count: 62 + }, { + sha: "62f9db5ba46a0824ed0e979743bc8aaec8e05010" + date: "2019-12-02 08:27:53 +0000" + description: "Refactor the sinks' request_* configuration" + pr_number: 1187 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 12 + insertions_count: 321 + deletions_count: 338 + }, { + sha: "cbac5010444357dae078b299991304ca8055889c" + date: "2019-12-02 19:23:02 +0000" + description: "Fix Raspbian id capitalization" + pr_number: 1295 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "a0eec9935a8a2d0409e23c6cb23cba807b16a7df" + date: "2019-12-02 22:44:56 +0000" + description: "Run `package-rpm*` jobs explicitly" + pr_number: 1298 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "5ae3036f0a0de24aeeb92135621c877428bcfa02" + date: "2019-12-03 11:28:27 +0000" + description: "Fix section links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 9 + insertions_count: 24 + deletions_count: 16 + }, { + sha: "1f52116c3c40dcc439bd8f32c9cdf2a0a3b197d7" + date: "2019-12-02 11:36:52 +0000" + description: "Fix browse downloads link" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "bd865b06bc2ff68edb3a131a574572b88fcc8b87" + date: "2019-12-03 12:29:30 +0000" + description: "Add slugify method to mimic Docusaurus hashing logic for links" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 7 + insertions_count: 23 + deletions_count: 11 + }, { + sha: "9d38c48a10b9d3deb8d35b6e97002cab4a03b885" + date: "2019-12-03 12:33:09 +0000" + description: "Fix buffers and batches hash link" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 10 + insertions_count: 20 + deletions_count: 20 + }, { + sha: "0c0f07265ad4020d68116c14113d917499ca862f" + date: "2019-12-03 13:30:43 +0000" + description: "Use the Rust regex tester, closes #634" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "d962fa60fd1e71cd2c9c02fc4e1ead2fd0a5086c" + date: "2019-12-03 13:35:39 +0000" + description: "Fix example regex" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 16 + deletions_count: 16 + }, { + sha: "17a27b315b4e65f687adb0d64d2b6c5cf8890a95" + date: "2019-12-03 15:55:03 +0000" + description: "Pass `TaskExecutor` to transform" + pr_number: 1144 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 25 + insertions_count: 67 + deletions_count: 35 + }, { + sha: "cf95723d77ba4bd3fa819dd45fa7676bd1a7d19d" + date: "2019-12-03 17:28:50 +0000" + description: "Add community page with mailing list" + pr_number: 1309 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 13 + insertions_count: 271 + deletions_count: 223 + }, { + sha: "c912f16f1cbd924db1e800498dbfb240e9211212" + date: "2019-12-03 17:45:00 +0000" + description: "Responsive styling for community page" + pr_number: null + scopes: ["wensite"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 14 + deletions_count: 2 + }, { + sha: "4c1718431e887c9a9f58392428cde6c2a33e5070" + date: "2019-12-03 18:04:41 +0000" + description: "Fix slide out main nav menu link labels" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 7 + deletions_count: 5 + }, { + sha: "14ebf42842d90f937df7efa88f7acea1bb1859e8" + date: "2019-12-03 18:53:47 +0000" + description: "Re-add components list" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 207 + deletions_count: 14 + }, { + sha: "6e60b2fab0de568ef47c5afdd606a60c3069531d" + date: "2019-12-03 21:44:27 +0000" + description: "Use ${ENV_VAR} syntax in relavant examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 9 + insertions_count: 33 + deletions_count: 29 + }, { + sha: "fcf6356f11ac7d80a5c378aeceabd6cf72168ef1" + date: "2019-12-04 12:21:43 +0000" + description: "Performance optimisations in metric buffer" + pr_number: 1290 + scopes: ["metric data model"] + type: "perf" + breaking_change: false + author: "Alexey Suslov" + files_count: 2 + insertions_count: 165 + deletions_count: 9 + }, { + sha: "690d798e8cc4d08457b5ad3dd3fcee4da7fea4b3" + date: "2019-12-03 23:24:55 +0000" + description: "Fix nav width" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 10 + deletions_count: 5 + }, { + sha: "53d2a9ca0ff85c8d39cf9b312265c859f079c170" + date: "2019-12-04 10:56:01 +0000" + description: "Update README with new links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 8 + deletions_count: 6 + }, { + sha: "00e21e83c54d2ca5e0b50b3b96a3390e761bf2dd" + date: "2019-12-04 13:23:44 +0000" + description: "Add `SinkContext` to `SinkConfig`" + pr_number: 1306 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 23 + insertions_count: 146 + deletions_count: 113 + }, { + sha: "52e4f176f62c305a6d0adcf6fa1f5b08bd2466dc" + date: "2019-12-04 19:18:34 +0000" + description: "Initial `new_relic_logs` sink implementation" + pr_number: 1303 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 13 + insertions_count: 1166 + deletions_count: 9 + }, { + sha: "4c1d8ceaef63fc9f73e5e568773bf569f6c2f460" + date: "2019-12-04 19:48:24 +0000" + description: "Fix NR build signature" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 12 + deletions_count: 11 + }, { + sha: "772672e65920de3c0f13fa5b86c9c428b2d3fbfb" + date: "2019-12-04 15:29:04 +0000" + description: "Add map to ServiceBuilder and s3" + pr_number: 1189 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 346 + deletions_count: 182 + }, { + sha: "59fd318f227524a84a7520bbae004d2c75156365" + date: "2019-12-04 22:34:43 +0000" + description: "Rename `datadog` sink to `datadog_metrics`" + pr_number: 1314 + scopes: ["datadog_metrics sink"] + type: "fix" + breaking_change: true + author: "Alexey Suslov" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "e76083548a2d46664acd67a8e40f1835614d94c5" + date: "2019-12-04 15:39:15 +0000" + description: "Sync with new toggle changes" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 166 + }, { + sha: "5822ee199bafbc2558491d5ba9682b8f10ed95d0" + date: "2019-12-05 09:31:01 +0000" + description: "Send aggregated distributions to Datadog" + pr_number: 1263 + scopes: ["datadog_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 231 + deletions_count: 0 + }, { + sha: "e776d3a404810935810983caf888aa86138b448b" + date: "2019-12-05 13:28:26 +0000" + description: "Test & validate subcommands without args target default path" + pr_number: 1313 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 32 + deletions_count: 7 + }, { + sha: "58d6e976cf81f2175e7fd6cc6d4c85c9e2bc88eb" + date: "2019-12-05 17:51:10 +0000" + description: "Fix statsd binding to loopback only" + pr_number: 1316 + scopes: ["statsd sink"] + type: "fix" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "324012b74c8879b1185ace3c5c36d9170222597e" + date: "2019-12-06 14:38:03 +0000" + description: "Fix multiple sources test" + pr_number: 1322 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "dc21766356a422e694287bff1b70fde8a49e74af" + date: "2019-12-06 15:54:01 +0000" + description: "Document `drop_field`" + pr_number: 1323 + scopes: ["json_parser transform"] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 42 + deletions_count: 2 + }, { + sha: "8d15fdd267df44ac9f5079e7b6a5a2bc122b9e1f" + date: "2019-12-07 10:53:05 +0000" + description: "Update to docusaurus 2.0.0-alpha.39" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 198 + deletions_count: 207 + }, { + sha: "2c6982502c75409806da7d74a4cc019f2c60ed08" + date: "2019-12-09 13:11:56 +0000" + description: "Add \"default-{musl,msvc}\" features" + pr_number: 1331 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 7 + insertions_count: 93 + deletions_count: 29 + }, { + sha: "fb7f1f5743e464294c62d11e1be0d26e309f2061" + date: "2019-12-09 11:06:57 +0000" + description: "Fix validating environment title" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8cb9ec9406315d87c10f297da115ced93c2418f1" + date: "2019-12-09 15:35:33 +0000" + description: "Use LLVM-9 from the distribution repository" + pr_number: 1333 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 31 + deletions_count: 87 + }, { + sha: "a68c9781a12cd35f2ee1cd7686320d1bd6e52c05" + date: "2019-12-09 13:26:38 +0000" + description: "Initial `splunk_hec` source implementation" + pr_number: null + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 1142 + deletions_count: 44 + }, { + sha: "9c53a5dd65c4711c58a5afede4a23c048c4bed4d" + date: "2019-12-09 17:00:16 +0000" + description: "Use LLVM from an archive instead of Git" + pr_number: 1334 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 33 + deletions_count: 63 + }, { + sha: "86abe53556fd7647717ddfecc21834f87adaa62b" + date: "2019-12-09 10:57:26 +0000" + description: "Update `shiplift 0.6`" + pr_number: 1335 + scopes: ["docker source"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "4b93936dc588438a3023a6d86075ca75a33921f3" + date: "2019-12-09 16:04:27 +0000" + description: "Rewrite getting started guide." + pr_number: 1332 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 76 + deletions_count: 54 + }, { + sha: "5461ff419b9587264bbce823af227e1a3007a578" + date: "2019-12-09 16:05:58 +0000" + description: "Update contribution guide for docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 53 + deletions_count: 18 + }, { + sha: "285b967ab228a94b4a140803cec38b71bb59ad14" + date: "2019-12-09 11:06:50 +0000" + description: "Add missing rate limited log" + pr_number: 1336 + scopes: ["grok_parser transform"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "137c51de9122c32cbbfba983f3068b6df1d6a68e" + date: "2019-12-10 09:34:53 +0000" + description: "Edit getting started guide" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "5c2c0af26554258d746051a5861ce9aaa869a8be" + date: "2019-12-10 16:42:08 +0000" + description: "Fix unit test spec rendering" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 43 + deletions_count: 39 + }, { + sha: "da89fa9fd801ff6f87412fb78d686936115b241c" + date: "2019-12-11 17:09:12 +0000" + description: "Build `msi` package for Vector" + pr_number: 1345 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 23 + insertions_count: 780 + deletions_count: 44 + }, { + sha: "8561d42eba3c5d30d57ab47c6454f19978c5ea4b" + date: "2019-12-11 15:56:33 +0000" + description: "Remove sleeps from topology tests" + pr_number: 1346 + scopes: ["testing"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 1 + deletions_count: 16 + }, { + sha: "8c991293ee2cd478fc639e96e6c27df794a0c5ec" + date: "2019-12-11 16:30:27 +0000" + description: "Detect and read gzipped files" + pr_number: 1344 + scopes: ["file source"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 7 + insertions_count: 127 + deletions_count: 21 + }, { + sha: "cbba6f180a583d4d7f236b64b77fdd6406bc6c63" + date: "2019-12-12 15:49:31 +0000" + description: "Put `etc` directory only to Linux archives" + pr_number: 1352 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 11 + deletions_count: 11 + }, { + sha: "1f9b9cf6eddf27557bcaa6a1e1139da0137dcb4c" + date: "2019-12-12 16:22:49 +0000" + description: "Allow passing features to `make build`" + pr_number: 1356 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "779d727fb49c907d6babbd8ed48e0db2cec14604" + date: "2019-12-12 16:53:31 +0000" + description: "Compress release archives with `gzip -9`" + pr_number: 1294 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "f8ad1b5a0edcf214865e4ba1133b3a0df1465905" + date: "2019-12-12 19:11:22 +0000" + description: "Add notices for OpenSSL to the license for binary distributions" + pr_number: 1351 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 22 + deletions_count: 1 + }] + } + "0.7.1": { + date: "2020-01-24" + codename: "" + whats_next: [] + commits: [{ + sha: "e5096d0ad09333cdcbf7f7b8fdea71764c61b940" + date: "2020-01-22 17:53:08 +0000" + description: "Make sorting of blog posts stable" + pr_number: 1566 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "ed11b854a21f8f8f4d0b532d2b946ed0d3a91718" + date: "2020-01-22 18:00:10 +0000" + description: "Add AWS API key for Windows tests in CI" + pr_number: 1565 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "e7bd9180249751dcef6299f4836b0a82274ec2f9" + date: "2020-01-22 18:00:26 +0000" + description: "Pass `CIRCLE_SHA1` environment variable to `release-github` job" + pr_number: 1567 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "95373bd03fa70d2bcbcfa9c6b02550bcd65d0623" + date: "2020-01-22 11:36:26 +0000" + description: "Fix crash when `in_flight_limit` is set to `1`" + pr_number: 1569 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 31 + deletions_count: 1 + }, { + sha: "678be7404a236bb6f5e596d117f8cadd16e5a690" + date: "2020-01-22 19:30:47 +0000" + description: "Fix error when socket addresses do not use `IPV4` or `IPV6` addresses" + pr_number: 1575 + scopes: ["socket sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 9 + insertions_count: 117 + deletions_count: 118 + }, { + sha: "c7de358cb72d38bc82544ba2c42c01a96be77961" + date: "2020-01-23 13:28:37 +0000" + description: "Fix `aws_kinesis_firehose` sink healthcheck" + pr_number: 1573 + scopes: ["aws_kinesis_firehose sink"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 13 + deletions_count: 22 + }, { + sha: "e5a3113f0ddfbcb08c6ce70dda374abbfdbc867d" + date: "2020-01-23 14:51:32 +0000" + description: "Escape special characters in options descriptions" + pr_number: 1580 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 26 + insertions_count: 53 + deletions_count: 53 + }, { + sha: "e1b6bc834a94066313c2de58e540845476289789" + date: "2020-01-23 19:44:22 +0000" + description: "Create `vector` user when installing RPM package" + pr_number: 1583 + scopes: ["rpm platform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 11 + deletions_count: 2 + }, { + sha: "dc3275705489d55e86d10f609fd5caf090b65f5d" + date: "2020-01-23 21:44:23 +0000" + description: "Support bug fixing releases" + pr_number: 1587 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 18 + deletions_count: 5 + }, { + sha: "8287f0535d1ddd5e6fadaf1368623dbe3d7579b0" + date: "2020-01-23 22:26:44 +0000" + description: "Add all generated files to the release commit" + pr_number: 1588 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "18b2d2f0d3b4f6df22883550dd106c2ec8c051d4" + date: "2020-01-23 23:31:55 +0000" + description: "Do not require `systemd` as an RPM dependency" + pr_number: 1590 + scopes: ["rpm platform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 2 + }, { + sha: "d9052b84a872f6562dfd0318a6c6c887c92fda34" + date: "2020-01-23 23:37:55 +0000" + description: "Add `release-push` target to the Makefile" + pr_number: 1589 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 30 + deletions_count: 0 + }] + } + "0.10.0": { + date: "2020-07-23" + codename: "Lamport Clock" + whats_next: [{ + title: "Kubernetes! ⎈" + description: "We're doing this integration right. We've been alpha testing with a number of large clusters and we've honed in on a very high-quality approach that reflects the UX Vector is going after. You can learn more in our [Kubernetes Integration RFC][urls.pr_2222]. Do you want to beta test? [Tell us!][urls.vector_chat]" + }, { + title: "WASM plugins 🔌" + description: "This release included an experimental implementation of a WASM transform behind a `wasm` build flag. We plan to stabilize WASM transforms, as well as add [WASM][urls.wasm] sinks and sources, enabling you to extend Vector in the language of your choice with little performance penalty. ❤️ WASM. Check out the [WASM Plugin RFC][urls.pr_2341] for more info." + }, { + title: "Dynamic HTTP Rate-Limiting (AIMD)" + description: "Fiddling with rate-limits is a frustrating endaevor. If you set them too high you risk overwhelming the service; too low and you're unecessarily limiting throughput. What happens if you deploy a few more Vector instances? Vector is planning to solve this by automatically detecting the optimal rate limits, taking inspiration from TCP congestional control algorithms. Check out the [Dynamic HTTP Rate Limiting RFC][urls.pr_2329]." + }] + commits: [{ + sha: "591114c96e8c6e4d0470c5419f77c6faf2524ade" + date: "2020-04-21 19:50:49 +0000" + description: "Add `sourcetype`" + pr_number: 2318 + scopes: ["splunk_hec sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 45 + deletions_count: 0 + }, { + sha: "020ee000b0b21d9d56fbc2b81b2fbb43b4c780e9" + date: "2020-04-21 16:28:51 +0000" + description: "Remove `proptest` dep" + pr_number: 2379 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 0 + deletions_count: 58 + }, { + sha: "639a21d07fe049735b174bc96cd78fae5b995a23" + date: "2020-04-21 14:49:20 +0000" + description: "New `generator` source" + pr_number: 2286 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 17 + insertions_count: 546 + deletions_count: 6 + }, { + sha: "4c8d5a1921815a9efac9f9d40f97fb9c4a44e437" + date: "2020-04-21 16:57:51 +0000" + description: "Bump cargo.toml version to `0.10.0`" + pr_number: 2386 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "397a20d6dcf413ad66c248a8427c580f30e2a783" + date: "2020-04-21 16:03:39 +0000" + description: "carve a seam around leveldb" + pr_number: 2363 + scopes: ["buffers"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 172 + deletions_count: 109 + }, { + sha: "5ad6db51805e1225a6ecf3dfed4ce13a63ab587b" + date: "2020-04-21 17:29:31 +0000" + description: "Correct timer example errors" + pr_number: 2391 + scopes: ["lua transform"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 11 + insertions_count: 139 + deletions_count: 183 + }, { + sha: "bcf1bfd2ea371d4414f9762790b22efcd1fc00b9" + date: "2020-04-21 22:05:58 +0000" + description: "Add authentication section" + pr_number: 2399 + scopes: ["gcp provider"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 72 + deletions_count: 14 + }, { + sha: "21ff9b55f6a9298889bb8b0c4f440282f1fb9197" + date: "2020-04-22 17:21:29 +0000" + description: "Upgrade `service` and `retries` in sink util" + pr_number: 2362 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 7 + insertions_count: 598 + deletions_count: 125 + }, { + sha: "632f62c61defb7970b5d29dc11050a595e8848b2" + date: "2020-04-22 17:59:33 +0000" + description: "Schema Guide" + pr_number: 1745 + scopes: ["config"] + type: "docs" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 683 + deletions_count: 0 + }, { + sha: "14c9ae89bd6204ceb952f871e380507d06792f86" + date: "2020-04-23 15:27:51 +0000" + description: "Upgrade rustc 1.43.0" + pr_number: 2422 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "e20ed1d5b9638cef0eb6392d0c485b20a0b312cb" + date: "2020-04-24 12:08:18 +0000" + description: "Cancel previous builds automatically" + pr_number: 2442 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 19 + deletions_count: 3 + }, { + sha: "55d8c9f1dabf75ce7b198ef26761d0707d4b2b6b" + date: "2020-04-24 16:34:38 +0000" + description: "Optimize CI test workflow with slim builds" + pr_number: 2440 + scopes: ["operations"] + type: "feat" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 147 + deletions_count: 89 + }, { + sha: "86d8f7f68bd55b30f817a938e8511ae35642906a" + date: "2020-04-26 14:38:17 +0000" + description: "Stylize tests/Makefile" + pr_number: 2450 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 90 + deletions_count: 80 + }, { + sha: "2593a8a627985c9a78daa99d75f53ab4b43395e7" + date: "2020-04-26 14:49:47 +0000" + description: "Use specific target names in Makefile" + pr_number: 2451 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 645 + deletions_count: 621 + }, { + sha: "509567996e2fb0b0454fb7b317785065db4fcc97" + date: "2020-04-26 15:17:34 +0000" + description: "Allow the invocation of local script from the tests/Makefile" + pr_number: 2452 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 59 + deletions_count: 5 + }, { + sha: "980798ab0a94fe58097930d1d6060fe762261b17" + date: "2020-04-26 18:24:35 +0000" + description: "Point CI GHA workflow to tests/Makefile" + pr_number: 2453 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 18 + insertions_count: 377 + deletions_count: 198 + }, { + sha: "c2d947b60aa3990ba9a3d9dcb84707f7d39a693c" + date: "2020-04-26 19:30:15 +0000" + description: "Add missing loki integration test" + pr_number: 2457 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 23 + deletions_count: 0 + }, { + sha: "ce7699a34078355ee54bccbea1aba039545c5147" + date: "2020-04-26 20:34:58 +0000" + description: "Add aws-integration-tests & gcp-integration-tests feature" + pr_number: 2458 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 10 + insertions_count: 23 + deletions_count: 26 + }, { + sha: "874f36d2d6dec52d74b43818d144393e678d7a0f" + date: "2020-04-26 23:57:56 +0000" + description: "Point nightly GHA workflow to tests/Makefile" + pr_number: 2456 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 224 + deletions_count: 127 + }, { + sha: "0ff1bf36f313e9a0498b2e6a95d7860e918d886e" + date: "2020-04-27 01:14:25 +0000" + description: "Remove old kubernetes tests" + pr_number: 2462 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 1 + deletions_count: 155 + }, { + sha: "a38ca109ded52378f89c25d61673ad2af1fd767a" + date: "2020-04-27 08:52:11 +0000" + description: "Move /tests/Makefile to /Makefile" + pr_number: 2460 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 11 + insertions_count: 279 + deletions_count: 444 + }, { + sha: "cd1d3b752b0d7abdecfa0e77b59dc33150a18908" + date: "2020-04-27 11:26:42 +0000" + description: "Ignore `RUSTSEC-2020-0014`" + pr_number: 2465 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "65d3973ecec71343f03038d5b23acf64dcc07f25" + date: "2020-04-27 11:54:00 +0000" + description: "Add rpm builds to nightly workflow" + pr_number: 2463 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 47 + deletions_count: 0 + }, { + sha: "5d40749dc25fcf4b62e8dc30dc9e3bd0865ced6a" + date: "2020-04-27 12:40:03 +0000" + description: "Upgrade `snafu` to `0.6`" + pr_number: 2466 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 11 + deletions_count: 19 + }, { + sha: "03c1947e025fbe01aec1d85071520087a29e0bc7" + date: "2020-04-27 14:14:18 +0000" + description: "Fix unit test failure in CI" + pr_number: 2470 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0f3c187af8656e47cbc228ab2db52683248cae25" + date: "2020-04-27 22:21:38 +0000" + description: "Fix typo at nightly CI workflow" + pr_number: 2472 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "a8b631eecdb0b73942b1880a6322787a8c838b1c" + date: "2020-04-27 14:59:45 +0000" + description: "add new isolated buffer benchmarks" + pr_number: 2447 + scopes: ["buffers"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 209 + deletions_count: 5 + }, { + sha: "f3a5814a97f9cca746bc845709372659a7739d72" + date: "2020-04-27 22:28:48 +0000" + description: "Fix nightly builds" + pr_number: 2471 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 258 + deletions_count: 158 + }, { + sha: "d0d2d53f55bae6800fa200d4e567d25f11b0e80e" + date: "2020-04-28 13:47:20 +0000" + description: "add profiling instructions" + pr_number: 2416 + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 77 + deletions_count: 0 + }, { + sha: "c39b89fe13b23b9f7698b8fb774376b2720feffc" + date: "2020-04-28 13:43:08 +0000" + description: "Encoding config accessor visibility reduction" + pr_number: 2153 + scopes: [] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 17 + insertions_count: 411 + deletions_count: 354 + }, { + sha: "6b44597fa84ea4efe5835837f445cd9cae0e386f" + date: "2020-04-29 15:25:17 +0000" + description: "Fix local unit test execution" + pr_number: 2490 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Matthias" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "338c83d091452804206e141a0001757ad1db8b78" + date: "2020-04-29 16:53:09 +0000" + description: "Fix some typos in contribution documentation" + pr_number: 2494 + scopes: [] + type: "docs" + breaking_change: false + author: "Matthias" + files_count: 1 + insertions_count: 10 + deletions_count: 11 + }, { + sha: "89edb5acc50a13dccbd985dce33dcfab51c8556a" + date: "2020-04-29 11:46:53 +0000" + description: "Update `request.retry_limit` default to 18446744073709551615" + pr_number: 2498 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 66 + insertions_count: 346 + deletions_count: 285 + }, { + sha: "fa371f5c3a4835f5169eb380e5dec290b718682a" + date: "2020-04-29 20:35:37 +0000" + description: "Prevent docker from creating directories and files as root" + pr_number: 2486 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 6 + insertions_count: 85 + deletions_count: 15 + }, { + sha: "58a551d5487e871b4116610330f4afa471c0a3f4" + date: "2020-04-29 12:40:52 +0000" + description: "Add `overwrite` option" + pr_number: 2485 + scopes: ["add_fields transform", "add_tags transform"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 9 + insertions_count: 177 + deletions_count: 21 + }, { + sha: "3905c2803f9c409d751f98bc28653fa228df3b01" + date: "2020-04-30 11:36:12 +0000" + description: "Finish git merge" + pr_number: 2502 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 0 + deletions_count: 8 + }, { + sha: "0903f68eb6a96dcf164ef55dddccb20f8f14d0df" + date: "2020-04-30 10:14:10 +0000" + description: "Fix batch options" + pr_number: 2397 + scopes: ["aws_cloudwatch_logs sink"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 17 + deletions_count: 17 + }, { + sha: "cfc418e69476e383c8786c3de8191a5a7eaeb732" + date: "2020-04-30 12:27:19 +0000" + description: "Temporarily remove Kubernetes components to clear the way for the new integration" + pr_number: 2488 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 15 + insertions_count: 1 + deletions_count: 3599 + }, { + sha: "ed8abcd2dbf17b95cb015a345d4bbe46af33e0ba" + date: "2020-04-30 10:41:14 +0000" + description: "RFC #1858 - 2020-04-06 - Automatic rate limit adjustment" + pr_number: 2329 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 327 + deletions_count: 0 + }, { + sha: "fb489a72543b986dcb5a9420581ed3d203eaecb0" + date: "2020-04-30 13:41:06 +0000" + description: "Kubernetes Integration RFC" + pr_number: 2222 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 4429 + deletions_count: 0 + }, { + sha: "0d4d6b6e0579e5562591b06193aaae1ea41caf13" + date: "2020-05-01 18:35:38 +0000" + description: "Add retries to the verification of the release artifacts" + pr_number: 2509 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 35 + deletions_count: 9 + }, { + sha: "9fd3831ef66bd28c8a794507b346ed832c9b5aa6" + date: "2020-05-01 19:34:40 +0000" + description: "RPM build fix" + pr_number: 2522 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 32 + deletions_count: 14 + }, { + sha: "7d33ddc9ee4e59da09c847f3c5492609e4e50354" + date: "2020-05-01 20:20:30 +0000" + description: "do not create /LICENSE and /README.md at RPM package" + pr_number: 2524 + scopes: ["rpm platform"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 4 + }, { + sha: "e01fc0b69c335efb426aaee30dc9bec442c99ea2" + date: "2020-05-01 20:54:20 +0000" + description: "Dynamically derive the list of paths to create at target" + pr_number: 2525 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 22 + }, { + sha: "8d1f6205e1b262d6111b1d27df0c1f95fe7b6563" + date: "2020-05-01 14:12:17 +0000" + description: "Run entire healthcheck lazily" + pr_number: 2501 + scopes: ["kafka sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 3 + deletions_count: 1 + }, { + sha: "2937c46e0f8f061c120a8c21840cc3101434266c" + date: "2020-05-01 14:12:35 +0000" + description: "Properly detect shutdown" + pr_number: 2429 + scopes: ["socket sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 142 + deletions_count: 6 + }, { + sha: "105ab797c02e0b6787fd56667d020beb80df3e88" + date: "2020-05-01 14:37:02 +0000" + description: "Align Github Actions workflows with the files changed" + pr_number: 2517 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 130 + deletions_count: 99 + }, { + sha: "4b0da62cdd66a464ba9479f012cda2f4f16e2a9f" + date: "2020-05-03 22:43:00 +0000" + description: "replace OrigID with OrgID" + pr_number: 2536 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Alexandre NICOLAIE" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1fc8869ac1f58f20ef3b53186bfb1ae4f5506383" + date: "2020-05-04 22:46:16 +0000" + description: "Iterate over target dirs to avoid missing mapall command on macOS" + pr_number: 2539 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Matthias" + files_count: 1 + insertions_count: 10 + deletions_count: 6 + }, { + sha: "e888f3b12f21d5d7d7c50f8bde893950ca631105" + date: "2020-05-04 23:33:17 +0000" + description: "Shutdown Vector if all sources finish" + pr_number: 2533 + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 70 + deletions_count: 6 + }, { + sha: "d187a7b4cf6989b5aa1a251a1c9f736c44983675" + date: "2020-05-04 16:43:21 +0000" + description: "add batching to disk buffer reads" + pr_number: 2481 + scopes: ["buffers"] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 16 + deletions_count: 9 + }, { + sha: "ec2ec165b95b3022ca4615fb2eb29b5953ad1a29" + date: "2020-05-04 16:26:33 +0000" + description: "Add support for negating conditions in check_fields" + pr_number: 2514 + scopes: ["filter transform", "swimlanes transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 161 + deletions_count: 4 + }, { + sha: "a9090b6bc5a8bcd4ce2928eb588e3b1da8e8e9ba" + date: "2020-05-05 11:01:46 +0000" + description: "Upgrade deny action" + pr_number: 2544 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "b5c6bbe5700be1084c32b84d58a1a63f21ce816b" + date: "2020-05-05 11:20:28 +0000" + description: "Fix rustfmt" + pr_number: 2545 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "61c03b65118359e1fd729a0e23f5925c5306f87b" + date: "2020-05-05 09:32:16 +0000" + description: "Add explicit AWS region" + pr_number: 2532 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 134 + deletions_count: 21 + }, { + sha: "4089b39a6c06b54a2d62d141d70230ed140e9b83" + date: "2020-05-05 15:10:39 +0000" + description: "Upgrade openssl to `1.1.1g`" + pr_number: 2546 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "70085092e04cf35315375cf1083c363f8677ca9e" + date: "2020-05-05 15:57:40 +0000" + description: "Disable `check-version` check" + pr_number: 2548 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 6 + deletions_count: 5 + }, { + sha: "3e4360a2db28b1837e49933e3a6a23b29f97b136" + date: "2020-05-05 23:32:12 +0000" + description: "Add shellcheck to CI and correct scripts" + pr_number: 2482 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 49 + insertions_count: 419 + deletions_count: 360 + }, { + sha: "a084309ba69e09e415dbb2eccdbcddd03438c9b8" + date: "2020-05-06 02:00:58 +0000" + description: "Remove the .github/workflows/install-script.yml" + pr_number: 2549 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 0 + deletions_count: 17 + }, { + sha: "a61ab97208c5b40c4cc242ac69d395e11b263838" + date: "2020-05-05 17:32:16 +0000" + description: "Add support for include/exclude units" + pr_number: 2540 + scopes: ["journald source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 195 + deletions_count: 58 + }, { + sha: "746792f79f64316214de9fee8d7ba6dfaf6f6cae" + date: "2020-05-06 10:18:04 +0000" + description: "Wire `ShutdownSignal`" + pr_number: 2366 + scopes: ["stdin source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 111 + deletions_count: 40 + }, { + sha: "505b688fa0c596d77e8de5c870894f399b818c51" + date: "2020-05-06 12:24:51 +0000" + description: "Use post requests" + pr_number: 2547 + scopes: ["datadog_metrics sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 67 + deletions_count: 10 + }, { + sha: "4026a74c7a7bfb1986c8bcb5d13600c937715290" + date: "2020-05-06 16:09:56 +0000" + description: "Upgrade internal rusoto utilities" + pr_number: 2555 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 292 + deletions_count: 2 + }, { + sha: "a2681967063bedfe5eb9f0754b7e197ad14bc885" + date: "2020-05-07 15:21:57 +0000" + description: "Kubernetes test procedure" + pr_number: 2487 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 9 + insertions_count: 315 + deletions_count: 8 + }, { + sha: "3a2b5264eaaebf18edb50a4f8a05b59d91ec7aff" + date: "2020-05-07 16:00:08 +0000" + description: "Upgrade to rustc `1.43.1`" + pr_number: 2563 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "234045737b7e695984a7128a3f6afa1e81f120f6" + date: "2020-05-11 17:42:16 +0000" + description: "Set minimal supported Kubernetes version" + pr_number: 2510 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 7 + deletions_count: 0 + }, { + sha: "2bb9384585dc7b75b77ec08e594e9fa1b742b99b" + date: "2020-05-11 19:22:08 +0000" + description: "Send time in . format" + pr_number: 2570 + scopes: ["splunk_hec sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 54 + deletions_count: 24 + }, { + sha: "bda112212cc31cf4ee00788d8199158c78a413d3" + date: "2020-05-11 19:24:19 +0000" + description: "Improve check-advisories CI job" + pr_number: 2571 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "184ee0cc29a9870d8cef7d5158293299833242c7" + date: "2020-05-11 20:57:38 +0000" + description: "Force disable k8s tests in CI" + pr_number: 2574 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "c35a0e9c8d31bd77cd8850523c5486ce48535f40" + date: "2020-05-11 17:09:12 +0000" + description: "Add `BatchedHttpSink` back to `http2`" + pr_number: 2573 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 138 + deletions_count: 1 + }, { + sha: "a43aaaae839bd4c4b6ba0e5e83da3a32793872fd" + date: "2020-05-11 19:51:08 +0000" + description: "Upgrade `clickhouse`" + pr_number: 2578 + scopes: ["clickhouse sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 34 + deletions_count: 31 + }, { + sha: "8d2a54f71b3ef6004c2902e55530f9eb58c8ea3f" + date: "2020-05-12 17:27:49 +0000" + description: "Copying docker images directly to minikube VMs" + pr_number: 2575 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 76 + deletions_count: 11 + }, { + sha: "1b732b42b159930bb0d3db5d7df1337596768fdc" + date: "2020-05-12 15:31:08 +0000" + description: "remove superfluous warning" + pr_number: 2583 + scopes: ["humio_logs sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "4392307dee63d0d6852d8ed13bd68b87a5b0898d" + date: "2020-05-12 16:00:10 +0000" + description: "remove unused import" + pr_number: 2582 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9fa5847b281a94d1b0c4e1b6031674f9fa9b0d6a" + date: "2020-05-12 20:44:36 +0000" + description: "remove id key from body" + pr_number: 2581 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 17 + deletions_count: 14 + }, { + sha: "2c8a818aec48d3eab662a24245e006e157b7643a" + date: "2020-05-13 08:34:58 +0000" + description: "Fix `units`/`include_units` switch " + pr_number: 2567 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "0e878251716da1434e32320a835b3d3763f580fb" + date: "2020-05-13 08:50:10 +0000" + description: "Test `vector` shutdown" + pr_number: 2558 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 13 + insertions_count: 497 + deletions_count: 24 + }, { + sha: "a61e0d5e947b9ee85b8778bb362c220146705777" + date: "2020-05-13 11:29:53 +0000" + description: "Mark GeneratorConfig::repeat with #[allow(dead_code)]" + pr_number: 2586 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "aa67dfe65e33bbf8e686add0ee0a5645a5f13f0f" + date: "2020-05-13 13:03:10 +0000" + description: "Update shutdown deadlines" + pr_number: 2534 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 11 + deletions_count: 12 + }, { + sha: "6858984ca4fb107ee05eaa9bd008747514f6b4de" + date: "2020-05-13 13:53:40 +0000" + description: "Use `localhost` for `prometheus` shutdown test" + pr_number: 2589 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 7 + deletions_count: 2 + }, { + sha: "b26a0e984aa5955743c1c0d6cd53c383a58534d3" + date: "2020-05-13 18:10:10 +0000" + description: "Generalize file server around paths provider" + pr_number: 2541 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 6 + insertions_count: 168 + deletions_count: 89 + }, { + sha: "c4e6de5b4cffc4aeee6c8c5cd4129fd110a0fc20" + date: "2020-05-13 09:59:26 +0000" + description: "Fix possible infinite loop decoding too-long lines" + pr_number: 2587 + scopes: ["socket sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 45 + deletions_count: 28 + }, { + sha: "0cdc500049ac6f587767105ce60bbc9a30458305" + date: "2020-05-13 21:50:14 +0000" + description: "Add RegexSet support to regex" + pr_number: 2493 + scopes: ["regex_parser transform"] + type: "enhancement" + breaking_change: false + author: "Matthias" + files_count: 8 + insertions_count: 161 + deletions_count: 55 + }, { + sha: "d93eea0e37af25f5a650f9497f2c636d6228efff" + date: "2020-05-13 23:07:42 +0000" + description: "Fix default values for buffer" + pr_number: 2590 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 62 + deletions_count: 2 + }, { + sha: "3f0012e789b0b9a00d5c3a9c6994110cef64dbef" + date: "2020-05-13 16:13:53 +0000" + description: "Allow removed keys to be used for partitioning" + pr_number: 2591 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 25 + deletions_count: 1 + }, { + sha: "484ba4cda42db38330698eacd05c781e212861d6" + date: "2020-05-13 18:38:34 +0000" + description: "log parse errors as warnings" + pr_number: 2593 + scopes: ["json_parser transform"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "97b589ab5f54393c1919cf644fad814ab705325b" + date: "2020-05-14 03:32:32 +0000" + description: "Mute k8s tests" + pr_number: 2594 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "0a0f8d5b6478237d7402ca9e356c9ff4ea6f8497" + date: "2020-05-14 16:36:00 +0000" + description: "Fix newline for check-fmt" + pr_number: 2602 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c02a0f77d328e1d4fee02095a2f1abfb4c3c09a7" + date: "2020-05-14 15:25:22 +0000" + description: "Add support for templates in the topic config" + pr_number: 2598 + scopes: ["kafka sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 25 + deletions_count: 8 + }, { + sha: "c39377cebd7809c25940d2578f49571735ac4aae" + date: "2020-05-15 18:13:38 +0000" + description: "Error on auth section and Auth header" + pr_number: 2611 + scopes: ["http sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 34 + deletions_count: 4 + }, { + sha: "43af8d01f6c966a65c35f12b2ef7d309ecb4c326" + date: "2020-05-15 14:02:07 +0000" + description: "Enable tls by default" + pr_number: 2527 + scopes: ["datadog_logs sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 13 + deletions_count: 15 + }, { + sha: "1eaee144b7be0da821c172486f3079f8e5e7c5a4" + date: "2020-05-15 12:13:04 +0000" + description: "Fixup loki tests" + pr_number: 2613 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "e5e5b06c7ec80905302b73407f1255a9aac4a5ea" + date: "2020-05-18 17:53:45 +0000" + description: "Fix invalid link in JSON Parser transform" + pr_number: 2624 + scopes: [] + type: "docs" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 3 + deletions_count: 5 + }, { + sha: "f758963ad28a698a35d772ea8d0cea3d9c2ef46c" + date: "2020-05-18 19:17:37 +0000" + description: "Build archives, DEB, and RPM packages with glibc" + pr_number: 2518 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 13 + insertions_count: 136 + deletions_count: 74 + }, { + sha: "6281932469d9bcef34edaf672b52edc804d49732" + date: "2020-05-19 11:16:28 +0000" + description: "Remove race between sink and source" + pr_number: 2619 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 45 + deletions_count: 18 + }, { + sha: "b2eaa5673caa2cead25e47b1c0536b6894433077" + date: "2020-05-20 00:55:27 +0000" + description: "Replace http:// to https://" + pr_number: 2646 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 45 + insertions_count: 88 + deletions_count: 88 + }, { + sha: "6d420f852e959f822c60c09b5466317d3438f62d" + date: "2020-05-20 19:04:10 +0000" + description: "Fix the bug with channel closing at file server" + pr_number: 2652 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 31 + deletions_count: 17 + }, { + sha: "741690a7e8dc50f9aecf5fe451d1a1fa4b0046c0" + date: "2020-05-20 23:25:10 +0000" + description: "Instrument few more components with metrics" + pr_number: 2620 + scopes: ["internal_metrics source"] + type: "feat" + breaking_change: false + author: "Alex" + files_count: 9 + insertions_count: 158 + deletions_count: 9 + }, { + sha: "6a4ac1723b845c083d33cfe6eb683c6e7531ffee" + date: "2020-05-20 15:03:19 +0000" + description: "Fix the Kafka TLS key parameter handling" + pr_number: 2658 + scopes: ["kafka sink", "kafa source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 22 + deletions_count: 2 + }, { + sha: "9f9ec375f5f4a2947041f9c3528f13ab17ac1e2b" + date: "2020-05-20 17:23:53 +0000" + description: "add bearer auth strategy" + pr_number: 2607 + scopes: ["http sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 9 + insertions_count: 149 + deletions_count: 14 + }, { + sha: "4a5c201428e20e4ab3aed7f88e7e124124a64719" + date: "2020-05-21 01:42:38 +0000" + description: "Check if `transform`/`sink` is present" + pr_number: 2617 + scopes: ["topology"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 110 + deletions_count: 13 + }, { + sha: "5d993333eb67bc05996cf6d77e94296a27bc537e" + date: "2020-05-21 17:46:21 +0000" + description: "Add uint type" + pr_number: 2659 + scopes: [] + type: "docs" + breaking_change: false + author: "Kirill Fomichev" + files_count: 67 + insertions_count: 586 + deletions_count: 585 + }, { + sha: "1d8e88057f68d9cf9292ddc9edb69a7f8d3b3f92" + date: "2020-05-22 02:13:16 +0000" + description: "Debug package-deb" + pr_number: 2665 + scopes: ["dpkg platform"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "e0761a9681da6b7e8cb69c750c41a7be89d24fed" + date: "2020-05-24 20:28:28 +0000" + description: "Shutdown starting from `tcp` and `unix` sockets " + pr_number: 2618 + scopes: ["sources"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 10 + insertions_count: 366 + deletions_count: 101 + }, { + sha: "2a4181c6c65ad1606d31c5fe9408964656a3bc7e" + date: "2020-05-25 16:53:32 +0000" + description: "Improve compression option" + pr_number: 2637 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 33 + insertions_count: 315 + deletions_count: 271 + }, { + sha: "54ab97f632b0054e464426c9a088d492dec8d0ec" + date: "2020-05-25 07:37:03 +0000" + description: "Rate limit TCP connection errors, downgrade their severity" + pr_number: 2669 + scopes: ["vector source"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "fcff9e8989ef8c1f7ed69fad6fee23d4cb9d4d31" + date: "2020-05-25 16:41:17 +0000" + description: "Avoid hanging on timers" + pr_number: 2672 + scopes: ["lua transform"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 120 + deletions_count: 4 + }, { + sha: "b88207cbb5d333e418fdcbe42e9a0bb1e390c6ab" + date: "2020-05-25 16:44:56 +0000" + description: "Initial `influxdb_logs` sink implementation" + pr_number: 2474 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Jakub Bednář" + files_count: 41 + insertions_count: 5582 + deletions_count: 1447 + }, { + sha: "5904a3e2e6011b1bc616ef236528d3c320f04a3a" + date: "2020-05-25 20:09:38 +0000" + description: "Fix buffer compression" + pr_number: 2679 + scopes: ["influxdb_logs sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 10 + deletions_count: 3 + }, { + sha: "77002bbacde7562bc78843eb3a831e3ed5a657eb" + date: "2020-05-26 02:37:36 +0000" + description: "Display free space at package deb task" + pr_number: 2686 + scopes: ["dpkg platform"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "b0746e5f8d304bcc23a0b2121239e68263da667f" + date: "2020-05-26 13:36:27 +0000" + description: "bump activesupport from 6.0.2.1 to 6.0.3.1 in /scripts" + pr_number: 2689 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot[bot]" + files_count: 1 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "ad5575a94a8dbf4a0c0f408bfe4b9160450818ef" + date: "2020-05-27 20:39:29 +0000" + description: "Upgrade `HttpSink` based sinks" + pr_number: 2688 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 20 + insertions_count: 324 + deletions_count: 300 + }, { + sha: "c4787acb8846f2492c1a0f923ea4acc0823112d2" + date: "2020-05-27 11:58:19 +0000" + description: "Add support for loading multiple CAs" + pr_number: 2616 + scopes: ["security"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 286 + deletions_count: 73 + }, { + sha: "29735f052cbea36db06d6db4db137114294ad50c" + date: "2020-05-27 14:05:25 +0000" + description: "Three KafkaCompression typos in tests" + pr_number: 2694 + scopes: ["kafka sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "92ce87953af8d5f235187b7ee6c8309d4d4d623f" + date: "2020-05-28 01:37:43 +0000" + description: "Fix default host value" + pr_number: 2683 + scopes: ["humio_logs sink"] + type: "docs" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "a7d760307f6126c8c24a8cd43fe07f46c3fcc637" + date: "2020-05-28 17:18:16 +0000" + description: "Cleanup the leftovers of the old k8s implementation" + pr_number: 2676 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 9 + insertions_count: 5 + deletions_count: 133 + }, { + sha: "19dd0a0589faf976c831fd3f9e8d961e3064e47d" + date: "2020-05-28 11:16:31 +0000" + description: "RFC #2341 - 2020-04-15 - WASM Plugins Support" + pr_number: 2341 + scopes: [] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 1388 + deletions_count: 0 + }, { + sha: "cacfb9319adb0d802894b1f187367a18f26d8773" + date: "2020-05-28 15:16:54 +0000" + description: "Reorder Tower service layers" + pr_number: 2703 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "c86d9a095234af0a08d9d2684b2eb9b84cedf3a8" + date: "2020-05-29 01:35:55 +0000" + description: "A script to install git hooks" + pr_number: 2650 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 16 + deletions_count: 1 + }, { + sha: "f6e0eb2eb87fe874d56261d61522fdc2928d8966" + date: "2020-05-28 15:52:48 +0000" + description: "Add `hostname` and `source_ip`" + pr_number: 2663 + scopes: ["syslog source"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 4 + insertions_count: 93 + deletions_count: 204 + }, { + sha: "1fbd2ffd87125390a2441b14bb5efbda6c2e1e92" + date: "2020-05-29 08:59:51 +0000" + description: "Add dev workflow simplification rfc" + pr_number: 2685 + scopes: ["platforms"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 253 + deletions_count: 0 + }, { + sha: "ce25c9446c7326dd20ebef88089ea59ab98d7b71" + date: "2020-05-29 12:49:18 +0000" + description: "Update syslog_loose to 0.3.0" + pr_number: 2709 + scopes: ["syslog source"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "8e8339fb8682e49c8b64bd6db1b6a3bfac8100bb" + date: "2020-06-02 17:07:03 +0000" + description: "Fix data_dir default value in global options" + pr_number: 2720 + scopes: [] + type: "docs" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 9 + deletions_count: 6 + }, { + sha: "94c6bdc7ff2013a17cdd07ca8eff039891c5f028" + date: "2020-06-02 18:22:22 +0000" + description: "Remove extra slash on Uri join" + pr_number: 2711 + scopes: ["splunk_hec sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 17 + deletions_count: 10 + }, { + sha: "ac62a2ac65ea6a60cfde3debb312f2528a7f1e0b" + date: "2020-06-02 19:02:15 +0000" + description: "Use `test_util::runtime()` in tests" + pr_number: 2715 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 31 + insertions_count: 145 + deletions_count: 147 + }, { + sha: "3c3b8021e49a445f0c7d69b2e21b227be65cd075" + date: "2020-06-02 19:07:37 +0000" + description: "Test with `default` feature in CI" + pr_number: 2721 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 46 + deletions_count: 3 + }, { + sha: "1c27da72c959419c36f44ed1aa53a1c1668bc196" + date: "2020-06-02 18:21:13 +0000" + description: "Fix check-markdown job" + pr_number: 2727 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 29 + deletions_count: 16 + }, { + sha: "724e830f2085c58101ad3160dffee3a609f6c7d6" + date: "2020-06-02 16:25:15 +0000" + description: "Add support for inline TLS keys" + pr_number: 2724 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 28 + insertions_count: 800 + deletions_count: 715 + }, { + sha: "f697d46f5b6b2bd69f42721d4f59e7752e454346" + date: "2020-06-02 18:26:54 +0000" + description: "Split website workflow into generate and blog" + pr_number: 2728 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 31 + deletions_count: 10 + }, { + sha: "d5e988a1bd402a84d736de3b61594c43a9a646c5" + date: "2020-06-03 02:34:52 +0000" + description: "Add compression to humio and new_relic" + pr_number: 2682 + scopes: ["humio sink", "new relic sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 112 + deletions_count: 2 + }, { + sha: "12480b546614824a06e4415b723923e5e73dd9df" + date: "2020-06-04 22:23:06 +0000" + description: "Only log `200`-`299` responses as successful" + pr_number: 2714 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 8 + insertions_count: 52 + deletions_count: 15 + }, { + sha: "35b7e563faca817950dd947803f606f0a4fc4d3e" + date: "2020-06-05 19:34:36 +0000" + description: "Fix wrong example in tokenizer module (version 0.9.1)" + pr_number: 2716 + scopes: [] + type: "docs" + breaking_change: false + author: "xluffy" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "4c77fc18af8db47db6138f149b45b1d4ced4354b" + date: "2020-06-05 18:10:49 +0000" + description: "Fix integration test" + pr_number: 2698 + scopes: ["kafka sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "54093b76eb06a077b70c0f1d98b53f5c85cd981b" + date: "2020-06-05 11:11:42 +0000" + description: "Add new EncodingConfig" + pr_number: 2671 + scopes: ["pulsar sink"] + type: "fix" + breaking_change: false + author: "Evan Cameron" + files_count: 1 + insertions_count: 14 + deletions_count: 11 + }, { + sha: "57c3d91abbf5fc33d4473d7557d5b1248b7620bf" + date: "2020-06-07 19:36:18 +0000" + description: "Upgrade `hyper`" + pr_number: 2754 + scopes: ["prometheus sink"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 25 + deletions_count: 19 + }, { + sha: "88aac3c6daf0480d2a4f03b0957462a2b06cb44a" + date: "2020-06-07 21:49:37 +0000" + description: "Add into" + pr_number: 2759 + scopes: ["pulsar sink"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0d750f0804beccd59533a5ded16d24be7e9efd77" + date: "2020-06-08 00:57:27 +0000" + description: "Upgrade `hyper`" + pr_number: 2761 + scopes: ["splunk_hec source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 11 + deletions_count: 11 + }, { + sha: "c56b365d53c161dd499c41f04bd15bb7c08a4d8b" + date: "2020-06-08 10:24:23 +0000" + description: "Upgrade `hyper`" + pr_number: 2758 + scopes: ["gcp_cloud_storage sink"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 106 + deletions_count: 39 + }, { + sha: "ee86731a755abe3c5d119f78dd89bae0eaaca6e8" + date: "2020-06-08 17:20:48 +0000" + description: "Updade rusoto" + pr_number: 2735 + scopes: ["aws_kinesis_firehose sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 8 + insertions_count: 628 + deletions_count: 166 + }, { + sha: "3d5177cc4dd28df31fc38c39287177cadaa1f9d0" + date: "2020-06-08 11:28:25 +0000" + description: "increment request id" + pr_number: 2765 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "569c066397d51d6be903fc5a7567c9619b705f9a" + date: "2020-06-08 11:07:29 +0000" + description: "Handle templates with invalid strftime elements" + pr_number: 2737 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 12 + insertions_count: 183 + deletions_count: 92 + }, { + sha: "654a5addb3808aac7a117b77cafac7785d2dcd3f" + date: "2020-06-08 20:26:38 +0000" + description: "Make throttling errors retryable" + pr_number: 2752 + scopes: ["aws_cloudwatch_sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "4ff1774a10a9a4ace5cdb14977d5aeef0bb20ea5" + date: "2020-06-08 22:29:51 +0000" + description: "Add clippy to CI" + pr_number: 2576 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 15 + insertions_count: 130 + deletions_count: 3 + }, { + sha: "d6962ee12e60944be5501409a8b4443b33b694d8" + date: "2020-06-09 00:16:30 +0000" + description: "Upgrade `hyper`" + pr_number: 2760 + scopes: ["influxdb_metrics sink"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 27 + deletions_count: 23 + }, { + sha: "23ba0dc395c5af4b64babb0e27a308d8b246c85d" + date: "2020-06-08 18:17:25 +0000" + description: "bump websocket-extensions from 0.1.3 to 0.1.4 in /website" + pr_number: 2756 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "3163beea3722a8ef765ce90524c330a6f6200988" + date: "2020-06-09 12:34:35 +0000" + description: "Upgrade `hyper`" + pr_number: 2762 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 8 + insertions_count: 115 + deletions_count: 131 + }, { + sha: "6502b1f811d1f35b3694a9cf911681d280491a1c" + date: "2020-06-09 18:12:34 +0000" + description: "Upgrade `hyper`" + pr_number: 2763 + scopes: ["aws_ec2_metadata transform"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 25 + deletions_count: 15 + }, { + sha: "d36c0a42407e1db52022b28dd4fa0d3030f5899a" + date: "2020-06-09 18:16:07 +0000" + description: "add pipeline config" + pr_number: 2734 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "francesco palazzini" + files_count: 4 + insertions_count: 61 + deletions_count: 1 + }, { + sha: "2132d6ee5de752a18c559c2d30f736b412fb8514" + date: "2020-06-10 12:25:08 +0000" + description: "Consolidate and beautify `validate` " + pr_number: 2622 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 14 + insertions_count: 500 + deletions_count: 266 + }, { + sha: "fd1ddefd4304de3f16542cc871ec8f65f13b2535" + date: "2020-06-10 10:44:34 +0000" + description: "Drop excessive large events" + pr_number: 2770 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 37 + deletions_count: 24 + }, { + sha: "6137b3d82599aa41a653c3bd2c61b8733f8df1ac" + date: "2020-06-10 21:30:06 +0000" + description: "Convert pipe to variables to debug 141 (broken pipe) error" + pr_number: 2772 + scopes: ["dpkg platform"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 5 + deletions_count: 4 + }, { + sha: "5706d2a7076ebee4d10b8d5caf8d90fb68ddda4a" + date: "2020-06-11 10:06:49 +0000" + description: "Introduce WASM Plugins" + pr_number: 2006 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 105 + insertions_count: 4214 + deletions_count: 58 + }, { + sha: "d518c29dd244e60c5c2a940670104c4259693d4a" + date: "2020-06-11 14:32:59 +0000" + description: "upgrade tower-limit to fix rate limiting" + pr_number: 2779 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "71f0d07ae0a39940c3c7c0ef90dc55c157343485" + date: "2020-06-11 14:54:36 +0000" + description: "Introduce Dev Workflow improvements" + pr_number: 2723 + scopes: ["operations"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 46 + insertions_count: 1395 + deletions_count: 846 + }, { + sha: "03ca67662342a43d9fb2a83c301464747742ac05" + date: "2020-06-12 18:10:59 +0000" + description: "Log when components finish normally" + pr_number: 2801 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 5 + deletions_count: 3 + }, { + sha: "e7e1b39c75992d406cb90cfeafce5bdef8625e3e" + date: "2020-06-12 22:17:58 +0000" + description: "fix aws integration tests ci" + pr_number: 2792 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 4 + insertions_count: 35 + deletions_count: 33 + }, { + sha: "a1d8d8de56f3b63d4aae41536d17914792039695" + date: "2020-06-13 10:36:01 +0000" + description: "tower update" + pr_number: 2805 + scopes: ["statsd sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 24 + deletions_count: 17 + }, { + sha: "61599531c8b2b7f4f1d9e01bd5126820b87a171a" + date: "2020-06-13 10:42:21 +0000" + description: "update rusoto to 0.44.0" + pr_number: 2806 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 359 + deletions_count: 336 + }, { + sha: "9efa014dd303301640f73fc930ef939a42fdb666" + date: "2020-06-13 19:56:02 +0000" + description: "Don't finish when timers finish" + pr_number: 2809 + scopes: ["lua transform"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 40 + deletions_count: 4 + }, { + sha: "c71b5b55ae00ddbd888fbae78c479f261c2e305a" + date: "2020-06-14 18:28:03 +0000" + description: "Update hyper" + pr_number: 2808 + scopes: ["prometheus source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 32 + deletions_count: 23 + }, { + sha: "1d3fa878a8ac3258cfa3dffe550cb850b6a1e656" + date: "2020-06-15 13:17:12 +0000" + description: "Update hyper in tests" + pr_number: 2817 + scopes: ["prometheus source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 23 + deletions_count: 15 + }, { + sha: "55587bebd915a2a7b80c1fa74a18a63d53f99cd7" + date: "2020-06-15 06:44:35 +0000" + description: "bump regex from 1.3.5 to 1.3.9" + pr_number: 2818 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "151e7584ef8d1ab4eaf7f7a6a4ac843db48b8755" + date: "2020-06-15 18:34:39 +0000" + description: "update goauth to new futures" + pr_number: 2819 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 95 + deletions_count: 20 + }, { + sha: "60d1eea7b096876e995b8326128595fefb398882" + date: "2020-06-15 08:44:02 +0000" + description: "Note on wasm" + pr_number: 2804 + scopes: ["wasm transform"] + type: "docs" + breaking_change: false + author: "Ana Hobden" + files_count: 6 + insertions_count: 28 + deletions_count: 2 + }, { + sha: "4ff2772444f21f8b4071e40200e1598616cdbd99" + date: "2020-06-15 18:48:34 +0000" + description: "improve time validation in tests" + pr_number: 2803 + scopes: ["humio_logs sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 12 + deletions_count: 3 + }, { + sha: "11c29d5ea32108672e329de584bc669f5be8de09" + date: "2020-06-15 11:57:36 +0000" + description: "Adding list of string as filter option" + pr_number: 2745 + scopes: ["filter transform"] + type: "feat" + breaking_change: false + author: "Bill" + files_count: 6 + insertions_count: 286 + deletions_count: 76 + }, { + sha: "18717d642ebdbdb4ba22a23dd9b0256ad12a8703" + date: "2020-06-15 19:30:30 +0000" + description: "Bump rust-toolchain to 1.44.0" + pr_number: 2813 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 6 + insertions_count: 6 + deletions_count: 10 + }, { + sha: "fcf3868c0005a449080beb298c8526bf5587b232" + date: "2020-06-15 10:45:28 +0000" + description: "bump maxminddb from 0.13.0 to 0.14.0" + pr_number: 2791 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "3e2ab7da5554a554d31f4f05526d0f562d5a309e" + date: "2020-06-15 10:47:30 +0000" + description: "bump tokio-codec from 0.1.1 to 0.1.2" + pr_number: 2783 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "bb1241a765a116df8c1582a447d5e168b790f56b" + date: "2020-06-15 21:19:21 +0000" + description: "Add missed option" + pr_number: 2662 + scopes: [] + type: "docs" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 80 + deletions_count: 18 + }, { + sha: "f9cb2186ed2254a9b87c2a9fa5f33c5fc6dabc94" + date: "2020-06-15 12:40:13 +0000" + description: "Add severity key" + pr_number: 2732 + scopes: ["gcp_stackdriver_logs sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 228 + deletions_count: 6 + }, { + sha: "1603fdc4dba0ba49a20b277bb03bfa4522bffed4" + date: "2020-06-16 20:21:02 +0000" + description: "Fix tcp+tls hang on tcp detect read" + pr_number: 2566 + scopes: ["socket sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 8 + insertions_count: 92 + deletions_count: 68 + }, { + sha: "6d15ec5a78f84641bba152cc924f0f7b098c899c" + date: "2020-06-16 13:58:06 +0000" + description: "RFC #2768 - 2020-06-12 - Batch and Buffer Rework" + pr_number: 2802 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 133 + deletions_count: 0 + }, { + sha: "267019ee140d4f4975f4de9706a8daef428b9780" + date: "2020-06-16 15:47:02 +0000" + description: "Fix typo integration tests" + pr_number: 2832 + scopes: ["kafka sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "6d4a8f4a70af95bffb6564175f7a0a43e1d524d9" + date: "2020-06-17 13:39:01 +0000" + description: "Allow disconnected components, and split `start` step" + pr_number: 2793 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 153 + deletions_count: 46 + }, { + sha: "b5de68d7f7b7299e6789f89f74ae20f41d3cd6ce" + date: "2020-06-17 12:35:20 +0000" + description: "More usable event RFC" + pr_number: 2692 + scopes: ["operations"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 222 + deletions_count: 0 + }, { + sha: "3ee64020dd9673cef0a8eab9605fe590b9948708" + date: "2020-06-17 12:43:58 +0000" + description: "Fix names of RFCs." + pr_number: 2839 + scopes: [] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 4 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "6819865a7550d32a2cbdc30de7b66c13b5a5cee3" + date: "2020-06-17 15:38:26 +0000" + description: "bump typetag from 0.1.4 to 0.1.5" + pr_number: 2835 + scopes: [] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "2ddf266397f8c67ccc3e77d8ab5ebe2849d4d62e" + date: "2020-06-17 16:36:21 +0000" + description: "bump assert_cmd from 0.11.1 to 1.0.1" + pr_number: 2829 + scopes: [] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 3 + insertions_count: 16 + deletions_count: 18 + }, { + sha: "4c3ba3e4bf09b259e446d2c13b50678552731f02" + date: "2020-06-17 17:22:53 +0000" + description: "Use flat JSON parser inserts, add regression test." + pr_number: 2823 + scopes: ["json_parser transform"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 34 + deletions_count: 1 + }, { + sha: "c0438da3c166d3718b8275d2a5858d8fb1767435" + date: "2020-06-17 17:23:36 +0000" + description: "bump criterion from 0.3.1 to 0.3.2" + pr_number: 2827 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 3 + insertions_count: 22 + deletions_count: 61 + }, { + sha: "947dc0e777ab617119d9317fc3143f8808ead09d" + date: "2020-06-18 08:01:38 +0000" + description: "update rusoto " + pr_number: 2830 + scopes: ["aws_cloudwatch_logs sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 32 + insertions_count: 398 + deletions_count: 1458 + }, { + sha: "22ec017d7c6ce0727c35afbccc1b025484dd9c04" + date: "2020-06-18 13:34:12 +0000" + description: "Make encoding optional" + pr_number: 2796 + scopes: ["loki sink"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "49de96d1aedbc87cc23d98a72d39b6745d24c49c" + date: "2020-06-18 14:32:24 +0000" + description: "Remove custom DNS resolution" + pr_number: 2812 + scopes: ["networking"] + type: "chore" + breaking_change: true + author: "Kruno Tomola Fabro" + files_count: 16 + insertions_count: 89 + deletions_count: 410 + }, { + sha: "fa040ac0504c6bf5d50be592e2f8946bf88b4414" + date: "2020-06-18 15:43:07 +0000" + description: "Finish `hyper` upgrade" + pr_number: 2852 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 28 + insertions_count: 59 + deletions_count: 83 + }, { + sha: "e48ea7fab3da98bd7b8d997ba5eef09c4b769f2b" + date: "2020-06-18 15:45:17 +0000" + description: "Properly synchronize file updates in `watcher` test" + pr_number: 2853 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1f3cb4cc0c755adf5a023a2cb6eeff277e1dc730" + date: "2020-06-18 18:22:42 +0000" + description: "warp update" + pr_number: 2837 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 9 + insertions_count: 204 + deletions_count: 144 + }, { + sha: "a0589e9326ac3367961712fb3b8bf9927e2e428a" + date: "2020-06-18 09:41:49 +0000" + description: "Fix release verification." + pr_number: 2847 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "fb6f6e89d094f973b6f03b812198294638e0a0dc" + date: "2020-06-18 23:32:02 +0000" + description: "Use to_raw_value instead of to_string in JsonArrayBuffer" + pr_number: 2860 + scopes: ["sinks"] + type: "enhancement" + breaking_change: false + author: "Nazar Mishturak" + files_count: 1 + insertions_count: 3 + deletions_count: 4 + }, { + sha: "865c8ba5bf6618097d40035df5642f354a581f28" + date: "2020-06-18 20:15:03 +0000" + description: "address potential deadlock" + pr_number: 2838 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "ed1afd14d0222c2d7ba9df1487e7592a4f2adee4" + date: "2020-06-19 09:46:19 +0000" + description: "Add content options" + pr_number: 2848 + scopes: ["aws_s3 sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 4 + insertions_count: 102 + deletions_count: 4 + }, { + sha: "cdebd75c0c0d0648de8f96a4503d1df6128d07f8" + date: "2020-06-19 11:58:02 +0000" + description: "file-source bytes update" + pr_number: 2865 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 5 + insertions_count: 25 + deletions_count: 10 + }, { + sha: "4e5429b14fbdea5114755c447c8324fbaaac08ca" + date: "2020-06-19 14:03:06 +0000" + description: "Use `i` for unsigned ints on Influx v1" + pr_number: 2868 + scopes: ["influxdb_metrics sink"] + type: "fix" + breaking_change: false + author: "Marcus Griep" + files_count: 3 + insertions_count: 292 + deletions_count: 27 + }, { + sha: "e84ff7579511f1b7302348f237c6aa06710f33b4" + date: "2020-06-19 22:28:34 +0000" + description: "fix wrap path" + pr_number: 2873 + scopes: ["splunk_hec source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 9 + deletions_count: 23 + }, { + sha: "5e00483d8b84b114910bf9aba55238151c329f71" + date: "2020-06-20 00:31:03 +0000" + description: "Show client errors as warning logs" + pr_number: 2825 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Tristan Keen" + files_count: 1 + insertions_count: 57 + deletions_count: 6 + }, { + sha: "6963b5807a79dcef54423e7436d794e979cadb5a" + date: "2020-06-20 17:16:53 +0000" + description: "Shutdown write side only in test `tcp_stream_detects_disconnect`" + pr_number: 2857 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "a5a60c89a5f4138b71330ee40ac82cb15e2fa879" + date: "2020-06-21 23:23:47 +0000" + description: "Bump rust-toolchain to 1.44.1" + pr_number: 2874 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "257440183c2c38c7445a9a468ddd803d03d912df" + date: "2020-06-22 14:06:06 +0000" + description: "Fix command for integration tests" + pr_number: 2834 + scopes: ["kafka sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 4 + insertions_count: 5 + deletions_count: 7 + }, { + sha: "c293c492e97a7249822be4907f6bab84414dae7d" + date: "2020-06-23 07:07:50 +0000" + description: "remove outdated uri code" + pr_number: 2889 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 8 + insertions_count: 11 + deletions_count: 19 + }, { + sha: "a7486664de540a73c244324e5a093b388a7bb7ee" + date: "2020-06-24 11:08:43 +0000" + description: "Adding a condition to filter using IP CIDR" + pr_number: 2831 + scopes: ["filter transform"] + type: "feat" + breaking_change: false + author: "Bill" + files_count: 9 + insertions_count: 277 + deletions_count: 3 + }, { + sha: "7cd8c7fc43bd80ab8ef45069190947a3f8ca93e4" + date: "2020-06-24 18:12:03 +0000" + description: "remove contextual QA checklist" + pr_number: 2898 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 0 + deletions_count: 24 + }, { + sha: "be4ecfbfbea9420f542fd77a9118c8a3e84c1f7a" + date: "2020-06-24 17:37:24 +0000" + description: "Adjust buffering tests" + pr_number: 2862 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 49 + deletions_count: 40 + }, { + sha: "aaa4fc1a158ad3d714af9fa8efdf54ca7047c698" + date: "2020-06-24 17:37:31 +0000" + description: "Remove `trust-dns` dependecies" + pr_number: 2884 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 1 + deletions_count: 152 + }, { + sha: "da94162850b609b4e6d5d23edde9e0e2abc33624" + date: "2020-06-24 22:54:46 +0000" + description: "make HttpSink::build_request async" + pr_number: 2888 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 14 + insertions_count: 135 + deletions_count: 76 + }, { + sha: "5e5d806b31b8db02e918da84b72ba9ac93071b8e" + date: "2020-06-24 22:56:58 +0000" + description: "add assume_role to aws auth" + pr_number: 2895 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 5 + insertions_count: 48 + deletions_count: 12 + }, { + sha: "68d3d39fa64936dd484ebcab23d734a621cfa94c" + date: "2020-06-25 10:09:38 +0000" + description: "Adding spaces to condition docs" + pr_number: 2901 + scopes: [] + type: "docs" + breaking_change: false + author: "Bill" + files_count: 5 + insertions_count: 43 + deletions_count: 43 + }, { + sha: "6517265bf97364d9f7176f8cb7e60d060be7f209" + date: "2020-06-25 18:38:23 +0000" + description: "refresh aws creds when required" + pr_number: 2859 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 42 + deletions_count: 28 + }, { + sha: "dc70fc5eb8b9d50238536ed2aabf43cf690105f3" + date: "2020-06-25 20:28:31 +0000" + description: "reduce http:0.1 usage" + pr_number: 2905 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 33 + insertions_count: 110 + deletions_count: 175 + }, { + sha: "832f25b10e5254fd026c498e41c25a047017fefb" + date: "2020-06-27 11:03:29 +0000" + description: "Add SASL to kafka" + pr_number: 2897 + scopes: ["kafka source", "kafka sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 13 + insertions_count: 511 + deletions_count: 107 + }, { + sha: "3d11c34703bdaed580e2e42ff8a28024ea504d40" + date: "2020-06-29 17:34:11 +0000" + description: "remove native-tls" + pr_number: 2912 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "d2fe5279087ceb3dddd561c3b3d01959fad59006" + date: "2020-06-29 12:08:28 +0000" + description: "Batch buffer rework" + pr_number: 2866 + scopes: ["sinks"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 73 + insertions_count: 1532 + deletions_count: 680 + }, { + sha: "4ee8ea039c1f8b5fa2d08248748d994d45d17bbf" + date: "2020-06-29 11:50:06 +0000" + description: "Fixup sasl related knobs" + pr_number: 2915 + scopes: ["kafka kink"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 17 + deletions_count: 10 + }, { + sha: "abd8d806fb05b373cb0a027fe2384e083cb8ff74" + date: "2020-06-29 17:08:47 +0000" + description: "Allow remapping priority numbers to words" + pr_number: 2751 + scopes: ["journald source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 89 + deletions_count: 8 + }, { + sha: "cd7404091459802d709b5a393f7b4827ddc100fc" + date: "2020-06-30 09:13:45 +0000" + description: "Handle and enforce batch.max_bytes" + pr_number: 2916 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 13 + insertions_count: 270 + deletions_count: 78 + }, { + sha: "079c360ca6ab1b5a80ea551146cba7596513de81" + date: "2020-06-30 09:14:43 +0000" + description: "Remove two unneeded files" + pr_number: 2918 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 0 + deletions_count: 12 + }, { + sha: "25ee24cc4ae2dee25852b05b0dc3a208c554f066" + date: "2020-07-01 01:02:47 +0000" + description: "Add Fingerprinter::FirstLineChecksum" + pr_number: 2904 + scopes: ["file source"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 96 + deletions_count: 0 + }, { + sha: "02c30220d6a2a34425b33fc25490a3520d748282" + date: "2020-07-01 15:23:55 +0000" + description: "Architecture Revisit RFC" + pr_number: 2855 + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 504 + deletions_count: 0 + }, { + sha: "01b37785f38159388636ff9954a3675c1ae0d049" + date: "2020-07-05 14:25:02 +0000" + description: "Option to remove file after some time of reaching `eof`" + pr_number: 2908 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 126 + deletions_count: 6 + }, { + sha: "c34607e8f060ad7648c72983180b57737b14ed0b" + date: "2020-07-06 17:23:21 +0000" + description: "move reqwest to dev-dependencies " + pr_number: 2952 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 18 + insertions_count: 1081 + deletions_count: 1164 + }, { + sha: "f69c26356fe4d37b0bf4c4bae321d58970e0df31" + date: "2020-07-06 17:33:16 +0000" + description: "add compression option for aws sinks" + pr_number: 2953 + scopes: ["sinks"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 17 + insertions_count: 417 + deletions_count: 282 + }, { + sha: "35330c77049c928adfaad755743aca4c2bcd47f3" + date: "2020-07-06 18:41:22 +0000" + description: "disable sasl feature" + pr_number: 2930 + scopes: ["kafka sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 24 + deletions_count: 29 + }, { + sha: "aa9a38c515bbd4dd0b5beed509053ce18247548c" + date: "2020-07-07 19:52:53 +0000" + description: "Construct tcp healthcheck with TLS" + pr_number: 2958 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 47 + deletions_count: 28 + }, { + sha: "07f5268355018a740b7c8df59ec4a7c10fc64169" + date: "2020-07-07 11:25:42 +0000" + description: "Move the builder to centos 7" + pr_number: 2824 + scopes: ["platforms"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 4 + insertions_count: 38 + deletions_count: 10 + }, { + sha: "4202a963723b4e5ec548e9dfe518e725a28c7885" + date: "2020-07-07 13:26:01 +0000" + description: "move topology building into tokio context, take 2" + pr_number: 2964 + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 56 + insertions_count: 810 + deletions_count: 791 + }, { + sha: "d1d7d1a6776018c7b2fd138765a2c9eab20038df" + date: "2020-07-07 21:34:44 +0000" + description: "upgrade to bollard" + pr_number: 2951 + scopes: ["docker source"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 967 + deletions_count: 1099 + }, { + sha: "b5e0eb76c9e90234c5487817cf4eac3470472967" + date: "2020-07-07 20:41:25 +0000" + description: "Extract timestamp from event" + pr_number: 2956 + scopes: ["kafka source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 51 + deletions_count: 14 + }, { + sha: "349ad526c8e86b3b271ee7bb04134d15a2c1566f" + date: "2020-07-08 10:08:57 +0000" + description: "remove some allowed clippy rules" + pr_number: 2959 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 107 + insertions_count: 491 + deletions_count: 640 + }, { + sha: "16ed04d2f39f7268eda2eaade9368612eb6d06aa" + date: "2020-07-08 08:50:29 +0000" + description: "Move over homebrew/s3 release jobs" + pr_number: 2869 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 417 + deletions_count: 0 + }, { + sha: "59c4132a77556b62be5b1ee07dbdd74194c08c3f" + date: "2020-07-08 18:54:47 +0000" + description: "Add smoke tests to docker images" + pr_number: 2974 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "466bf49d3e580a48f082f12c098e4b2b6459d2a1" + date: "2020-07-08 11:03:57 +0000" + description: "bump nom from 5.1.0 to 5.1.2" + pr_number: 2967 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 15 + deletions_count: 30 + }, { + sha: "3c1ba1ba4eed48eb15cd45cea8635d3077f4da20" + date: "2020-07-08 11:57:26 +0000" + description: "Update README.md" + pr_number: 2976 + scopes: [] + type: "chore" + breaking_change: false + author: "Eugene Marinelli" + files_count: 3 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "ae53b889406942dd404578fc84d2675ea2e282ac" + date: "2020-07-08 12:45:30 +0000" + description: "bump scan_fmt from 0.2.4 to 0.2.5" + pr_number: 2849 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "fe6f9c29299f4dd45824ba9700aa3dc76de46ab7" + date: "2020-07-08 14:30:23 +0000" + description: "Configure dependabot to use correct issue names" + pr_number: 2821 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 4 + insertions_count: 45 + deletions_count: 4 + }, { + sha: "bf3df05f132bebe9bb3e993318fb19a14d50273f" + date: "2020-07-08 14:39:26 +0000" + description: "Fixup dependabot config" + pr_number: 2980 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "448b64139b8a5440cc0c885f3e9cd9635a2d8be4" + date: "2020-07-08 15:01:31 +0000" + description: "bump built from 0.3.2 to 0.4.2" + pr_number: 2850 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 3 + insertions_count: 22 + deletions_count: 10 + }, { + sha: "863923b359afeb61742f88284e844f395f52afa2" + date: "2020-07-08 15:34:05 +0000" + description: "Make environment use ubuntu 20.04" + pr_number: 2971 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 15 + insertions_count: 213 + deletions_count: 418 + }, { + sha: "3702885aa8092eb9b2a4c666800dcb7c2346c818" + date: "2020-07-08 15:42:35 +0000" + description: "bump @types/fs-extra from 8.1.0 to 8.1.1 in /website" + pr_number: 2986 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "c8c8aaa399e134d88c9c4ae62b7a9ed8c750be6f" + date: "2020-07-08 16:07:52 +0000" + description: "bump yard from 0.9.24 to 0.9.25 in /scripts" + pr_number: 2981 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7aba11cf95660936d338068b77bcf422adaba78c" + date: "2020-07-08 16:11:03 +0000" + description: "bump @types/lodash from 4.14.150 to 4.14.157 in /website" + pr_number: 2988 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "60eddc87fea0869023eb1b193e5d6b8f18dcaac0" + date: "2020-07-08 16:12:00 +0000" + description: "Fix dependency label to be longer again." + pr_number: 2990 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "25d0aa1855938b549e9cfa42c38c6a22f7951c60" + date: "2020-07-08 16:57:12 +0000" + description: "bump qs from 6.9.3 to 6.9.4 in /website" + pr_number: 2982 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "472d3b6181f1480ab49e70c5efa0522be7e7038b" + date: "2020-07-09 17:12:28 +0000" + description: "Add smoke test for cmark-gfm at builder" + pr_number: 2995 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "d78ffbfb59a14624621d42303bd5ce40f06c6f05" + date: "2020-07-09 17:32:13 +0000" + description: "Fix the cmark-gfm installation at builder image" + pr_number: 2996 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "1cd6e0697a51a22381a7b7eceda9c0e172b0504b" + date: "2020-07-09 18:14:27 +0000" + description: "Use perl rename at builder" + pr_number: 2999 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 4 + deletions_count: 1 + }, { + sha: "caa68d812bcd456ecd0de685e09d60f12463350d" + date: "2020-07-09 14:02:04 +0000" + description: "Add musl and glibc support to install.sh" + pr_number: 2969 + scopes: ["operations"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 17 + deletions_count: 2 + }, { + sha: "969b52de850196a4f4010d2a719a25d26d4aebc3" + date: "2020-07-10 16:48:29 +0000" + description: "remove outdated security advisories" + pr_number: 3009 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "e5d828374b53ba952a36991db6214b90b2d61424" + date: "2020-07-10 07:07:29 +0000" + description: "Make dependabot only cover Rust code" + pr_number: 3003 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 0 + deletions_count: 26 + }, { + sha: "8fec89e3fc64543e63167fff4e2f8208114cbf0f" + date: "2020-07-10 09:34:03 +0000" + description: "Fix filter_unit_works_correctly test" + pr_number: 3005 + scopes: ["journald source"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "37790157c61ea630635f88da81564937cfb00585" + date: "2020-07-10 11:08:41 +0000" + description: "Fixup release script formats" + pr_number: 3002 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "7c61530eb67cd33d45959e71115f613a32991e38" + date: "2020-07-10 11:09:52 +0000" + description: "Fixup release-s3" + pr_number: 3015 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9c0ecdbc5478b17503522f826ecb3dbd2bf68e10" + date: "2020-07-10 21:36:41 +0000" + description: "sync token request" + pr_number: 2993 + scopes: ["gcp provider"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 68 + deletions_count: 69 + }, { + sha: "db85e2faf6b590b76eed72530ce9600fbbcc03b6" + date: "2020-07-11 00:03:22 +0000" + description: "update to 1.0.0-alpha-2" + pr_number: 2977 + scopes: ["pulsar sink"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 13 + insertions_count: 321 + deletions_count: 294 + }, { + sha: "0036ede7448f3b6f849d1f13fc832c24cc522890" + date: "2020-07-10 14:04:42 +0000" + description: "Remove dead envrc file" + pr_number: 3023 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "63792bb8ef63fc560ff436122763a74cab30706b" + date: "2020-07-10 14:05:14 +0000" + description: "Remove the dns-server options from docs" + pr_number: 3021 + scopes: [] + type: "docs" + breaking_change: false + author: "Ana Hobden" + files_count: 6 + insertions_count: 1 + deletions_count: 52 + }, { + sha: "36efd3970f6931221e5749539c93f218e9147dba" + date: "2020-07-10 16:24:08 +0000" + description: "Set a proper default for the batch buffer initialization" + pr_number: 3016 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 17 + deletions_count: 20 + }, { + sha: "bb497befcb38793573404e56fda3871ed0ece3c3" + date: "2020-07-10 18:28:49 +0000" + description: "add reduce transform" + pr_number: 2870 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 17 + insertions_count: 2496 + deletions_count: 3 + }, { + sha: "ccbb802c010c6109240a14be44b3c30dbae73564" + date: "2020-07-11 09:59:29 +0000" + description: "update prost" + pr_number: 3017 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 33 + deletions_count: 113 + }, { + sha: "f28b64b9f34f7da81597bb935359e1c4a3658431" + date: "2020-07-11 12:53:34 +0000" + description: "update crates in lock file after merges" + pr_number: 3030 + scopes: ["deps"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "a9a5d3980db07ec51a23648f95721790c0c6397e" + date: "2020-07-12 00:11:46 +0000" + description: "Accept `octet-counting` encoded messages" + pr_number: 2955 + scopes: ["syslog source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 8 + insertions_count: 226 + deletions_count: 36 + }, { + sha: "5c88a7c156bf4b0c96c5be329c1fe03e8f838aac" + date: "2020-07-11 16:21:53 +0000" + description: "Drop 3 more clippy exclusions" + pr_number: 3026 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "feb4e44f7d6888e967ed650e8fde824a5d35c166" + date: "2020-07-12 01:34:11 +0000" + description: "Delete `topology::config::watcher::tests::multi_file_update` " + pr_number: 3034 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 0 + deletions_count: 16 + }, { + sha: "e3dca13f68773ca690b75395a46aa4b4649b2d44" + date: "2020-07-12 01:35:00 +0000" + description: "Delete `topology::reload_tests::topology_reuse_old_port` " + pr_number: 3036 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 0 + deletions_count: 38 + }, { + sha: "20b2c504412c499503729bbf998f805c69de9b76" + date: "2020-07-12 01:36:52 +0000" + description: "Build `HttpClient` once" + pr_number: 3010 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 14 + insertions_count: 114 + deletions_count: 155 + }, { + sha: "2e15006ac26f3699fa10a4811d9da7ad9914d7dc" + date: "2020-07-12 18:20:19 +0000" + description: "remove tokio-codec crate" + pr_number: 3011 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 11 + insertions_count: 339 + deletions_count: 30 + }, { + sha: "3848bfd7f23144f2da92775def678c6c750626cd" + date: "2020-07-12 17:54:22 +0000" + description: "Delete `tests\\tcp::merge` test" + pr_number: 3042 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 0 + deletions_count: 64 + }, { + sha: "359adb8c5a2a3040717dd87a2c09e5fdc0185dc9" + date: "2020-07-12 17:56:58 +0000" + description: "Delete `test_udp_syslog` test" + pr_number: 3044 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 1 + deletions_count: 65 + }, { + sha: "9880fbebb38c5bd4bdcbf795b74800794292fe31" + date: "2020-07-12 17:58:30 +0000" + description: "Delete `test_max_size_resume`" + pr_number: 3040 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 2 + deletions_count: 63 + }, { + sha: "752fb6574503da0811d4033289fc45ab974926de" + date: "2020-07-12 21:33:27 +0000" + description: "bump num_cpus from 1.12.0 to 1.13.0" + pr_number: 2910 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "680266eef07fa3f76c96ca9ea055bb704457a19a" + date: "2020-07-13 06:29:10 +0000" + description: "bump serde_json from 1.0.55 to 1.0.56" + pr_number: 3051 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "23888d1a7aff7629bf601abcaedbc7009f55ad45" + date: "2020-07-13 06:37:34 +0000" + description: "bump bytesize from 1.0.0 to 1.0.1" + pr_number: 3050 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "5d8f535454d9d9e6a166d2466f5f45bd26e8b5e2" + date: "2020-07-13 17:36:14 +0000" + description: "bump to pulsar-rs 1.0.0" + pr_number: 3053 + scopes: ["pulsar sink"] + type: "chore" + breaking_change: false + author: "Alexandre DUVAL" + files_count: 3 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "cd4620e3a2010259f988769c8a97388a9666f2da" + date: "2020-07-13 19:16:50 +0000" + description: "reduce std::io::Cursor usage" + pr_number: 3038 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 9 + insertions_count: 32 + deletions_count: 40 + }, { + sha: "c1ca0295b3cda2fad9bef6b8212a220fc6feeffe" + date: "2020-07-13 10:58:46 +0000" + description: "Fixup integration test invocations" + pr_number: 3004 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 2 + deletions_count: 3 + }, { + sha: "e60d96d8d70e6d14118b4330eb5ccaa131afa896" + date: "2020-07-13 12:30:43 +0000" + description: "Fix broken `make generate`" + pr_number: 3058 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 485 + insertions_count: 566 + deletions_count: 999 + }, { + sha: "7aef527df7cdc3c2abb330a22911c94c62b1010c" + date: "2020-07-14 09:15:51 +0000" + description: "bump bollard from 0.7.0 to 0.7.1" + pr_number: 3048 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 2 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "dc5c6cc7f38564d8ccb4f0774c0e398428428973" + date: "2020-07-14 12:05:45 +0000" + description: "Cleanup `generate` command" + pr_number: 3046 + scopes: ["cli"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 6 + deletions_count: 3 + }, { + sha: "60a9ded146a2188980c8d31cdd374ebd59b6e6f1" + date: "2020-07-14 12:12:07 +0000" + description: "Build `aws` sinks clients only once" + pr_number: 3045 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 66 + deletions_count: 60 + }, { + sha: "dc564ed54508da96f62de57b33e6b2a994114d3d" + date: "2020-07-14 20:20:12 +0000" + description: "remove owning_ref" + pr_number: 3059 + scopes: ["kafka source"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 166 + deletions_count: 131 + }, { + sha: "ae20875fc7cd4b38dc7215c0b65cb22073490864" + date: "2020-07-14 22:45:40 +0000" + description: "update runtime_transform" + pr_number: 3065 + scopes: ["transforms"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 14 + insertions_count: 165 + deletions_count: 133 + }, { + sha: "b8fa14e21885af394586cf2ee7c736646a6ae7e2" + date: "2020-07-14 21:55:00 +0000" + description: "Add .rustfmt.toml to assist IDE formatters." + pr_number: 3063 + scopes: [] + type: "chore" + breaking_change: false + author: "Harold Dost" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "df65bf9830eaffdcb8798865abb710ad67a8628a" + date: "2020-07-14 19:44:15 +0000" + description: "add SinkConfig::build_async" + pr_number: 3066 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 11 + insertions_count: 132 + deletions_count: 121 + }, { + sha: "0faa07b42276dcdec3e077ff835610ebe865f21f" + date: "2020-07-15 16:07:33 +0000" + description: "adjust log event in coercer transform" + pr_number: 3072 + scopes: ["docs"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "64d7dc8d215c25e61d3e4937ac748a8342a6afad" + date: "2020-07-15 07:34:20 +0000" + description: "Remove CircleCI" + pr_number: 3001 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 0 + deletions_count: 967 + }, { + sha: "30123d970e586a634cae729b37fe918bd5b13fd2" + date: "2020-07-15 07:35:03 +0000" + description: "bump flate2 from 1.0.13 to 1.0.16" + pr_number: 3049 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "2e29c36d8225c6fa82419932e88f92372c755847" + date: "2020-07-15 21:59:31 +0000" + description: "fix endpoint strip for aws region" + pr_number: 3070 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 19 + deletions_count: 1 + }, { + sha: "4866bded3c73d4340cb114efd73de3e872b5906f" + date: "2020-07-15 14:29:20 +0000" + description: "Add sync-install to GA" + pr_number: 3056 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 18 + deletions_count: 3 + }, { + sha: "efcf4b934aa83bd41d2bc54e8dc5bb3a64a78266" + date: "2020-07-15 16:01:02 +0000" + description: "bump tokio-uds from 0.2.5 to 0.2.7" + pr_number: 2946 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "0b9e4cc40dc7e5c19a2451993da29c78e38f858a" + date: "2020-07-15 19:02:35 +0000" + description: "improve array handling" + pr_number: 3076 + scopes: ["reduce transform"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 132 + deletions_count: 14 + }, { + sha: "ff1884062e09956a7696a644f80c401e593d3e6f" + date: "2020-07-16 08:11:25 +0000" + description: "Fix warning message due .rustfmt.toml" + pr_number: 3075 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "6e097287b82ac48651649695593b3c7533873de1" + date: "2020-07-16 09:13:51 +0000" + description: "Drop the batch byte size limit" + pr_number: 3025 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 5 + deletions_count: 39 + }, { + sha: "ca6df186b089b3535eecf609e1aaf54aed4c4df3" + date: "2020-07-16 20:41:25 +0000" + description: "use localstack instead minio" + pr_number: 3073 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 4 + deletions_count: 20 + }, { + sha: "3af6816cceafcd6af56d5f8e9286915b50a8025f" + date: "2020-07-16 11:18:40 +0000" + description: "bump inventory from 0.1.5 to 0.1.7" + pr_number: 2851 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot-preview[bot]" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "59871fd3f54bd4710d14fde4089088dcb2932d81" + date: "2020-07-16 11:51:19 +0000" + description: "Reenable sasl" + pr_number: 3081 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 8 + insertions_count: 56 + deletions_count: 36 + }, { + sha: "773e5e5c85c5d5849e306783468e4cfb7fd82f74" + date: "2020-07-16 14:26:19 +0000" + description: "append instead of overwriting files" + pr_number: 3084 + scopes: ["file sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 43 + deletions_count: 0 + }, { + sha: "1de26d27ed36062e58c702e4c5af168175d4ca87" + date: "2020-07-16 16:26:39 +0000" + description: "stop logging every received event" + pr_number: 3085 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 7 + deletions_count: 2 + }, { + sha: "e31f20da83114449109c3a9f14a9ac097da28dfb" + date: "2020-07-19 21:41:16 +0000" + description: "Remove `tcp_graceful_shutdown` " + pr_number: 3103 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 0 + deletions_count: 111 + }, { + sha: "f37f2e0ce3d94679bf88ba5826b43e705977bfdc" + date: "2020-07-19 21:42:21 +0000" + description: "Remove `s3_waits_for_full_batch_or_timeout_before_sending`" + pr_number: 3105 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 0 + deletions_count: 70 + }, { + sha: "75b89d490c17f9985089237111e83e4e8c5f71df" + date: "2020-07-20 20:57:33 +0000" + description: "Bump rust-toolchain to 1.45.0" + pr_number: 3089 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 18 + deletions_count: 18 + }, { + sha: "876b4104c2009b8e7be71a6c90c4c0f810a90c76" + date: "2020-07-20 14:03:22 +0000" + description: "Docker debian packages systemd" + pr_number: 3129 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "ed9006f783445c1636fb50a56fae1fffec4ab1e3" + date: "2020-07-20 15:38:34 +0000" + description: "Add Rust 2018 edition tag to .rustfmt.toml" + pr_number: 3120 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "1c91ed0883f6743f471095cdad2ec570f1e74bee" + date: "2020-07-20 15:22:41 +0000" + description: "Debian buster is stable, not sid" + pr_number: 3131 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "129a861500f435f94e4b4153f49f1f821715ee46" + date: "2020-07-20 23:52:26 +0000" + description: "bump lodash from 4.17.15 to 4.17.19 in /website" + pr_number: 3098 + scopes: ["deps"] + type: "chore" + breaking_change: false + author: "dependabot[bot]" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "5885c7d579ebd95e231589da38056db476d6040c" + date: "2020-07-21 12:53:47 +0000" + description: "Update benches" + pr_number: 3107 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 16 + deletions_count: 14 + }, { + sha: "bfb0fcaa89a118913547ad01c6c2a98fdec29cf7" + date: "2020-07-21 13:50:27 +0000" + description: "Remove tokio 0.1 from shutdown" + pr_number: 3106 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 18 + deletions_count: 21 + }, { + sha: "b6004be220b2071093a066bb16449f1a4e5814ba" + date: "2020-07-21 09:32:09 +0000" + description: "Update cidr-utils" + pr_number: 3139 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "3b8bf31a4f6cd5a47016bba934e16aabe04c18ff" + date: "2020-07-21 10:26:37 +0000" + description: "Pin back Rust 1.45 until after 0.10.0" + pr_number: 3142 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "675ad3be743cc754197e108d2da4427d636dd682" + date: "2020-07-21 11:25:33 +0000" + description: "Fixup clippy issues from 1.44 rollback" + pr_number: 3143 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "22fa53afbef21189d7eb8c7eea68864fc494fa93" + date: "2020-07-21 12:34:52 +0000" + description: "Prepare v0.10.0" + pr_number: 3022 + scopes: [] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 42 + insertions_count: 14414 + deletions_count: 140 + }, { + sha: "c6d0cad0fe058ad7a5a5603410d03ff2d4b321c5" + date: "2020-07-22 09:51:17 +0000" + description: "Fixup release-homebrew" + pr_number: 3158 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "680b7eeb5cfcbd6f7a338591270066560c5397ab" + date: "2020-07-22 09:35:22 +0000" + description: "Fixup version invocation to not have info" + pr_number: 3153 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "341183a2034493ad2f7236c07e1f35d546fca514" + date: "2020-07-22 09:35:03 +0000" + description: "Fixup release s3 job" + pr_number: 3155 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "19e62802df15dc21f6c4f2e3f94f1e5bbe9437e4" + date: "2020-07-22 11:13:37 +0000" + description: "Fixup release commit" + pr_number: 3163 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 11 + deletions_count: 1 + }] + } + "0.7.0": { + date: "2020-01-22" + codename: "The Great Omelet" + whats_next: [] + commits: [{ + sha: "fba1b3e0676f6abc1b1ed5178a4972cd62b7fb01" + date: "2019-12-12 11:24:50 +0000" + description: "Fix handling of locally-installed bundles" + pr_number: 1354 + scopes: [] + type: "docs" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "e46e4601c28862e211f518a592152146be9f35ad" + date: "2019-12-12 12:28:17 +0000" + description: "Rename Alex to Alexander" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "cb42cd5b124fc3e5b3950eccd6e94d521d9d23e5" + date: "2019-12-12 12:33:46 +0000" + description: "Fix broken footer links" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "0be789eb87539a2ea7983ccc5f0c0a69a839b1a3" + date: "2019-12-12 12:34:11 +0000" + description: "Redirect legacy /mailing_list path" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "edad28226bfc9aec44e07edb0e6d8fdc73ab663b" + date: "2019-12-12 12:47:18 +0000" + description: "Fix release-github" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "d11da73b6e83411b3c925575a045358c764c0824" + date: "2019-12-12 13:30:10 +0000" + description: "Fix release-github" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 7 + deletions_count: 3 + }, { + sha: "a2020d9ac1fc3bd0ba44a5c22f8a2ad0c6b00d5b" + date: "2019-12-12 14:03:10 +0000" + description: "Add output fields to `geoip` transform docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "bradybot" + files_count: 18 + insertions_count: 235 + deletions_count: 22 + }, { + sha: "15881bd03656de0abdeb875c0c452ed40a1d7f9b" + date: "2019-12-12 22:57:26 +0000" + description: "Fix `release-github` job" + pr_number: 1359 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 5 + deletions_count: 7 + }, { + sha: "70af6520c302ecb9c35a32f043803c7132d0d60e" + date: "2019-12-12 22:59:09 +0000" + description: "Fix typo" + pr_number: 1360 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "261a0ef36e32c79fe9f8622ac3e23258e530668c" + date: "2019-12-12 23:11:13 +0000" + description: "Document Docker build" + pr_number: 1358 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 186 + deletions_count: 9 + }, { + sha: "67d4680b6313d1fb6c67450441ce781aa46dca0c" + date: "2019-12-12 19:15:36 +0000" + description: "Fix blog responsive styling issues" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "bradybot" + files_count: 3 + insertions_count: 13 + deletions_count: 9 + }, { + sha: "1610637a3884dd56af0b217dd496b9a5542c9a6f" + date: "2019-12-12 19:49:13 +0000" + description: "Fix link typo in footer" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "bradybot" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "df7f478873a87433e5e547b3ced19cf28772c4e3" + date: "2019-12-13 04:44:40 +0000" + description: "Inital `kubernetes` source implementation " + pr_number: 893 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 11 + insertions_count: 1199 + deletions_count: 17 + }, { + sha: "c4d7544eab196b573418340445acbc88ac17acf3" + date: "2019-12-12 22:50:30 +0000" + description: "Bump version to 0.7.0" + pr_number: 1364 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "cdd7383e8e4060e2ea63c079270cff337790aee5" + date: "2019-12-13 14:48:42 +0000" + description: "Fix config file location" + pr_number: 1367 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "9bf1f1cd89264db2ddfb6a7b4dbb33a30b4a437d" + date: "2019-12-13 13:38:22 +0000" + description: "Fix value types link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "46a792408a6e2f82e26c8adfa606e62918552f61" + date: "2019-12-13 09:01:25 +0000" + description: "make truncation utf8-aware" + pr_number: 1361 + scopes: ["regex_parser transform"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 13 + deletions_count: 1 + }, { + sha: "1496c1cf219ff3d1f2a84909d19e43afad4852c5" + date: "2019-12-13 15:48:28 +0000" + description: "Fix bool default values in spec" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 10 + insertions_count: 33 + deletions_count: 31 + }, { + sha: "5ab0e260a5f0dd8d8d46795b64683563d0e81c87" + date: "2019-12-13 15:34:00 +0000" + description: "Improve blog markup" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 8 + insertions_count: 59 + deletions_count: 32 + }, { + sha: "8859769a4b48311c2511c8b255ad44c44f4b17bb" + date: "2019-12-13 16:56:05 +0000" + description: "updated version in Cargo.lock" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "2de0b0677319a9e04fe80aa39ebfe7bd8de84053" + date: "2019-12-13 22:45:21 +0000" + description: "Remove columnar formats section in aws_s3 sink" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 0 + deletions_count: 11 + }, { + sha: "894c480cc14145cdf2fd3c8ec6180a751f3f64b5" + date: "2019-12-14 11:18:40 +0000" + description: "Fix release description" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 8 + insertions_count: 55 + deletions_count: 41 + }, { + sha: "3c39da88208c96ffeb92aadd9845e925e7bf4bf6" + date: "2019-12-14 11:20:21 +0000" + description: "Downcase release description" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "6fffabeaacea11be9c290b6b9dcd05c16daf4376" + date: "2019-12-15 11:20:32 +0000" + description: "Add opengraph image" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "7da81767048c938f829dc90956751c2dbeca4871" + date: "2019-12-15 11:29:33 +0000" + description: "Fix open-graph image name" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7c7b2c078ba025df99d6f9d49e4bf3a02b11c22f" + date: "2019-12-15 21:30:36 +0000" + description: "Fix performance comparison link" + pr_number: 1369 + scopes: ["website"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 21 + insertions_count: 55 + deletions_count: 54 + }, { + sha: "a9d09d466ff9b76dbe573448df890c8ba7e68082" + date: "2019-12-15 14:23:01 +0000" + description: "Update readme with new release links" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 15 + insertions_count: 122 + deletions_count: 22 + }, { + sha: "371e4d6667dec15578267babb31780466af6d167" + date: "2019-12-15 14:30:25 +0000" + description: "Add new post to readme" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 42 + deletions_count: 4 + }, { + sha: "4ee339a0e8e828aa1fe51155b202f66e6f713bcd" + date: "2019-12-15 14:33:36 +0000" + description: "Center new post announcement" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 18 + deletions_count: 6 + }, { + sha: "7252b0901ef936ccf0eb085cbc24c919e3c17a8f" + date: "2019-12-16 15:55:44 +0000" + description: "Update concepts doc" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 50 + deletions_count: 24 + }, { + sha: "396e2eff21d4accb638aee77658459db73da6612" + date: "2019-12-16 20:35:50 +0000" + description: "Initial `concat` transform implementation" + pr_number: 1271 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "foosinn" + files_count: 12 + insertions_count: 598 + deletions_count: 1 + }, { + sha: "99698fc215a7fcd8304f14a1c49b9a867d6fad6e" + date: "2019-12-16 15:56:41 +0000" + description: "Add example output" + pr_number: null + scopes: ["concat transform"] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 90 + insertions_count: 605 + deletions_count: 764 + }, { + sha: "cb9fbdb33d6e52284efabb120a4fbe7d5bad5ac4" + date: "2019-12-16 15:57:09 +0000" + description: "Formalize output data structure" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 10 + deletions_count: 0 + }, { + sha: "0be3b0a4e35df961788579fa7d4f37480d7eda51" + date: "2019-12-16 16:00:59 +0000" + description: "Re-order guides" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 13 + deletions_count: 8 + }, { + sha: "aada71a3c36434e67fba97ab29a52ffe76a34e34" + date: "2019-12-16 16:28:45 +0000" + description: "Update links to end with a / to avoid redirects" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 88 + insertions_count: 558 + deletions_count: 565 + }, { + sha: "648e28c8ed7c8819f152c67325f7900eb7ab08a9" + date: "2019-12-16 16:36:49 +0000" + description: "Initial `aws_ec2_metadata` transform implementation" + pr_number: 1325 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 15 + insertions_count: 1333 + deletions_count: 10 + }, { + sha: "c03853ea1ed1e1bc438ea1acc6c516d182190fb0" + date: "2019-12-16 15:56:11 +0000" + description: "remove accidentally committed vector.toml" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 0 + deletions_count: 17 + }, { + sha: "34ae27fd6600cc62f65786284ade21f3aa3f47c7" + date: "2019-12-16 19:08:19 +0000" + description: "Fix builds by updating website dependencies" + pr_number: 1379 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 10 + insertions_count: 287 + deletions_count: 477 + }, { + sha: "7626e8c1a50e11c2c78b10a015c541c1a817a144" + date: "2019-12-16 20:46:24 +0000" + description: "Fix broken links" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 58 + insertions_count: 52 + deletions_count: 77 + }, { + sha: "f6d97e46cc01ad558e2da2d56d73f1fe4627e04e" + date: "2019-12-16 20:50:35 +0000" + description: "Remove uncessary #! link in sidebar" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 5 + }, { + sha: "b78cd10f8627cb4cd51d8691e7f76de41c542a27" + date: "2019-12-16 23:47:18 +0000" + description: "Fix website redirect and meta descriptions" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 94 + insertions_count: 307 + deletions_count: 368 + }, { + sha: "0d1ca551b058f38c21ef44793dc252dd95a4f8a0" + date: "2019-12-16 23:52:30 +0000" + description: "Add more meta descriptions to pages that were missing it" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 8 + insertions_count: 7 + deletions_count: 11 + }, { + sha: "981b41cd1a324045aecdbc2f20b288a14f78599e" + date: "2019-12-17 12:30:10 +0000" + description: "Fix config path for MSI package" + pr_number: 1378 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "d92220bb361236cb3073ac1bdaab3037f39479be" + date: "2019-12-17 14:34:09 +0000" + description: "Update metric docs according to new data model" + pr_number: 1268 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexey Suslov" + files_count: 17 + insertions_count: 774 + deletions_count: 441 + }, { + sha: "9d2ab35c4ed0f4c5a233473c31342168f2fd6561" + date: "2019-12-17 12:38:49 +0000" + description: "Update data model pages" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 47 + deletions_count: 24 + }, { + sha: "671dfe06db04846d1858d166b6f3c802a4fed395" + date: "2019-12-17 17:23:18 +0000" + description: "Update statsd docs" + pr_number: 1381 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexey Suslov" + files_count: 2 + insertions_count: 12 + deletions_count: 8 + }, { + sha: "b454e32da034af8d44005dde3028d0e51c2b6259" + date: "2019-12-17 10:42:56 +0000" + description: "Remove unneeded remove clones" + pr_number: 1375 + scopes: [] + type: "chore" + breaking_change: false + author: "Evan Cameron" + files_count: 21 + insertions_count: 75 + deletions_count: 80 + }, { + sha: "3e741aeaf6dec363ca2d71679d92685b57cfb0d4" + date: "2019-12-17 17:17:21 +0000" + description: "Edit guarantees page" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 58 + deletions_count: 32 + }, { + sha: "46e5a1bbaa56ddc7983320dc9960b2fd8703c2f6" + date: "2019-12-17 12:40:51 +0000" + description: "Fix spelling mistake" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "4c2c85363197ccb5f95d59bb7adce23a14c7a2ab" + date: "2019-12-13 11:34:33 +0000" + description: "Add custom DNS option" + pr_number: 1362 + scopes: ["networking"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 8 + insertions_count: 157 + deletions_count: 123 + }, { + sha: "f9d4d0665ea2c5c2d57472f722522270354c50f2" + date: "2019-12-17 14:31:37 +0000" + description: "Add keywords script" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 99 + deletions_count: 0 + }, { + sha: "6546d5b7dc80493ad6017ab250efa06abccd0dc8" + date: "2019-12-17 14:44:46 +0000" + description: "Fix option linking format" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 39 + insertions_count: 215 + deletions_count: 180 + }, { + sha: "bc50aa32b6dadcb28df98fee2220636754da3629" + date: "2019-12-17 14:53:00 +0000" + description: "Reorganize metrics data model" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 245 + deletions_count: 129 + }, { + sha: "a8b6f2deb64153283a3bd584e92244298b85a7a1" + date: "2019-12-17 20:32:39 +0000" + description: "Fix more unecessary redirect links" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 14 + insertions_count: 42 + deletions_count: 42 + }, { + sha: "71972ada1c815169cefbf25ecc1d8b626bfba4f1" + date: "2019-12-17 20:37:48 +0000" + description: "Update Vector announcement post to use h2s" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "095ef6605da1f2b31a51706903b5a553e4a64f57" + date: "2019-12-17 20:45:23 +0000" + description: "Drop use of docs.vector.dev" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 13 + insertions_count: 23 + deletions_count: 28 + }, { + sha: "05efdd1e63f9adb5dc8e0ba6bba7c946ca8921d5" + date: "2019-12-18 15:00:40 +0000" + description: "Fix configuration example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 6 + insertions_count: 85 + deletions_count: 41 + }, { + sha: "14a76afee831fabac0928f5034d2af214b7128fa" + date: "2019-12-18 19:35:59 +0000" + description: "Handle interruptions on Windows" + pr_number: 1219 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 9 + deletions_count: 6 + }, { + sha: "bdd9139a6b84b669d3b31b54ed766edd9317bc88" + date: "2019-12-18 11:21:42 +0000" + description: "Fix more links that redirect" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "3b333f073426ec0e975f10b37b52ef0b3f4eeae4" + date: "2019-12-18 13:10:56 +0000" + description: "Link link and metric tags to their respective data model pages" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "f499ddcde59f76f5ad6bea4c6ec04f0bbc5292c7" + date: "2019-12-18 13:35:39 +0000" + description: "Initial `gcp_pubsub` sink implementation" + pr_number: 1308 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 18 + insertions_count: 1542 + deletions_count: 11 + }, { + sha: "95148c6ba0059c7201c793332d17d429ea6020e0" + date: "2019-12-18 20:00:41 +0000" + description: "Make the pass_list field optional" + pr_number: 1384 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "ba8737d6c78e9b0bd05a5ac4b335531fcef946dc" + date: "2019-12-18 23:49:17 +0000" + description: "Add aliases for latest major and minor versions" + pr_number: 1386 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 14 + insertions_count: 54 + deletions_count: 41 + }, { + sha: "f3a98c49bab6c0e1ce897e2b2d9fe4603505953e" + date: "2019-12-18 20:02:29 +0000" + description: "Improve page titles" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 110 + insertions_count: 349 + deletions_count: 203 + }, { + sha: "2807fe84df547481685c9b3043b381d6d86708a7" + date: "2019-12-18 20:06:42 +0000" + description: "Fix GCP pubsub title" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 7 + deletions_count: 6 + }, { + sha: "179c80dbacc361be86d8057cfdcef65e8448a5e5" + date: "2019-12-18 20:13:07 +0000" + description: "Add Google service provider to new gcp_pubsub sink" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 12 + insertions_count: 10 + deletions_count: 491 + }, { + sha: "d31a1c1d6e39f2636759b8d1ac5bd9bdf59a47d7" + date: "2019-12-18 22:05:05 +0000" + description: "Stop testing with ext host" + pr_number: null + scopes: ["splunk_hec sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 9 + deletions_count: 1 + }, { + sha: "9500d7761f00b2feac0a209bf023711631047d99" + date: "2019-12-19 16:06:34 +0000" + description: "Use Bundler 2.0.x for the checker and releaser images" + pr_number: 1392 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "3dcba4ff06a7361357d4f628464312055886524d" + date: "2019-12-19 12:25:38 +0000" + description: "Upgrade to rustc 1.40.0" + pr_number: 1395 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "c19fa9fa4b9a0811172e7aec459f2d7d2c5b96f4" + date: "2019-12-19 15:23:01 +0000" + description: "Add custom dns for `vector` and `tcp` sinks" + pr_number: 1400 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 38 + deletions_count: 10 + }, { + sha: "8cd7f2a1da1be9113d3c6d9588b1d77428563866" + date: "2019-12-19 17:03:21 +0000" + description: "Fix https rusoto connections" + pr_number: 1396 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "828b50c8fb2eb0b4259ef38a306bbe4c6d6071b9" + date: "2019-12-19 10:58:11 +0000" + description: "Update components meta tags to be more descriptive;" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "dda55bafaffdbbe282920ae68c02c9340bf82cab" + date: "2019-12-19 16:16:47 +0000" + description: "Add \"How It works\" to `gcp_pubsub` docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 85 + deletions_count: 11 + }, { + sha: "518ba0ee5c6b944c2056748e9aff29e0b5b05423" + date: "2019-12-19 16:59:54 +0000" + description: "Initial `aws_kinesis_firehose` sink" + pr_number: 1388 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 17 + insertions_count: 1280 + deletions_count: 10 + }, { + sha: "b76ae43e1a911435d3acca210624530fbbd60066" + date: "2019-12-20 11:52:06 +0000" + description: "Add getting started button to about page" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 12 + deletions_count: 6 + }, { + sha: "34fadad61caad363adb4090aa22a89415db235f4" + date: "2019-12-20 16:56:08 +0000" + description: "Fix field names in regex example" + pr_number: 1405 + scopes: [] + type: "docs" + breaking_change: false + author: "Matthias Endler" + files_count: 2 + insertions_count: 5 + deletions_count: 4 + }, { + sha: "2019265ac4dd64ae18d782dbf7372123358174c7" + date: "2019-12-20 12:04:04 +0000" + description: "Update healtchecks to use resolver" + pr_number: 1403 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 80 + deletions_count: 48 + }, { + sha: "9b6ebb92bc3e1e095ffdda222c13e42859e8a2bb" + date: "2019-12-20 17:30:35 +0000" + description: "Update keywords" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 48 + deletions_count: 47 + }, { + sha: "2b0ff8f3099bced53b45fa097bfbf580600888e9" + date: "2019-12-21 13:32:33 +0000" + description: "Fix `splunk_channel` option description, closes #1410" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "51189a85a368fbb15a7c8d7d134939a917d66087" + date: "2019-12-21 15:03:27 +0000" + description: "Vertically center hero on homepage" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 8 + deletions_count: 3 + }, { + sha: "b15dc67401f9af6fa248a8d94ae6a6b581f77803" + date: "2019-12-21 15:27:41 +0000" + description: "Further improve page descriptions" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 36 + insertions_count: 54 + deletions_count: 96 + }, { + sha: "10b38fb7f061d554eeba0181b2681a9b2123dfee" + date: "2019-12-22 20:59:59 +0000" + description: "Fix `rustup` for `x86_64-pc-windows-msvc` in CI" + pr_number: 1414 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 13 + deletions_count: 14 + }, { + sha: "97b47a84a41cd015fa9c61f6d07d7af3913a6e07" + date: "2019-12-22 12:59:17 +0000" + description: "Add netlify.toml for redirects" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 7 + insertions_count: 27 + deletions_count: 6 + }, { + sha: "2f07be6f0052a9a8a284e334ab15448fd7449d57" + date: "2019-12-22 13:00:28 +0000" + description: "Fix duplicate description keys" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 0 + deletions_count: 4 + }, { + sha: "d563b47f577c0208f1998a5667ceed791afc7add" + date: "2019-12-22 13:06:18 +0000" + description: "/mailing_list redirect rule should end with a slash" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "d7ae1828ef012ae2069df2e4842a6addff42532b" + date: "2019-12-23 19:28:31 +0000" + description: "Add scripts/signoff-git-hook.sh" + pr_number: 1420 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 28 + deletions_count: 0 + }, { + sha: "48e6f4c9bf988fd942c6c271133b959382ef3021" + date: "2019-12-23 10:50:12 +0000" + description: "Add note about Syslog3164 not being supported" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 26 + deletions_count: 12 + }, { + sha: "81829da1d1828d2ee4a223ebba3b4a36119aae19" + date: "2019-12-23 12:49:45 +0000" + description: "Add contact and press sections" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 21 + insertions_count: 460 + deletions_count: 38 + }, { + sha: "207aabf4f29a75d99d8b5e82867fd6b63c4d5424" + date: "2019-12-23 13:00:19 +0000" + description: "Add Vector icon" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 29 + deletions_count: 0 + }, { + sha: "48e6d16012cc2f01542d39f9d9fcece8bffe211c" + date: "2019-12-23 13:03:27 +0000" + description: "Fix typo" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "fae036ebcb4d2b586bf2976e713f4d7391f0f705" + date: "2019-12-23 19:00:36 +0000" + description: "Fix mailinglist form styling" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 7 + deletions_count: 1 + }, { + sha: "09d815ddd0f88f6c18672ced8b28545f97f708bb" + date: "2019-12-24 10:42:21 +0000" + description: "Send min for distributions" + pr_number: 1422 + scopes: ["datadog_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 8 + deletions_count: 6 + }, { + sha: "49cf60eee59521348cc3438967c47f499f33fac6" + date: "2019-12-24 19:02:06 +0000" + description: "Accept Into<...> values at LogEvent::insert" + pr_number: 1419 + scopes: ["log data model"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 37 + insertions_count: 242 + deletions_count: 410 + }, { + sha: "e4e83ee5c5ecc82aa4d180a6a7f376670ee482be" + date: "2019-12-25 10:57:42 +0000" + description: "Update to docusaurus 2.0.0-alpha.40" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 13 + insertions_count: 641 + deletions_count: 508 + }, { + sha: "640503df5439b3dc9d5ffa17fa36d7c3b83200b2" + date: "2019-12-26 22:37:24 +0000" + description: "Remove duplicated line from the installation docs " + pr_number: 1438 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 2 + deletions_count: 15 + }, { + sha: "1db14e77a17a534a991708b942eb1e3c4b067331" + date: "2019-12-26 15:14:01 +0000" + description: "Add #vectordev hashtag" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 3 + deletions_count: 4 + }, { + sha: "62b4e91e6bca6b33019b5c6143f0e594e1da9db9" + date: "2019-12-27 16:13:21 +0000" + description: "Check for EOLs and trailing spaces in CI" + pr_number: 1425 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 69 + insertions_count: 617 + deletions_count: 545 + }, { + sha: "9988918f4ea20c2edaee4d6afcd2c73d41c8d358" + date: "2019-12-27 16:34:13 +0000" + description: "Add .editorconfig" + pr_number: 1421 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 14 + deletions_count: 0 + }, { + sha: "2a2c171ebb974757037cc3415e8002a275c96d7e" + date: "2019-12-27 20:43:38 +0000" + description: "Ensure that all files are checked by `check-style.sh`" + pr_number: 1441 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 266 + insertions_count: 486 + deletions_count: 480 + }, { + sha: "3fe52dd153d453a7c1d3f488157c73de4850a67d" + date: "2019-12-28 10:59:44 +0000" + description: "Fix scrolling issue when mobile slide out nav closes" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 27 + deletions_count: 2 + }, { + sha: "a4e47e2dd090e4121b6051d7f372821a6aab4b3d" + date: "2019-12-28 12:26:49 +0000" + description: "Fix broken release notes links, closes #1452" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "265f5bdd62ddf5491626aa7118f81cc36e47fa82" + date: "2019-12-28 13:23:27 +0000" + description: "Fix badge spacing on changelog" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "ba50491c5fb020c50326ee9ba6a6e9b44b900016" + date: "2019-12-28 13:44:18 +0000" + description: "Allow containers to be flush" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "45e5743ee187a0c9933decea2c2937698a5b41b2" + date: "2019-12-29 12:17:50 +0000" + description: "Fix `make check-fmt`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8a7ffc59f58242a6a2ecbd41aa6a62961da99752" + date: "2019-12-31 12:06:29 +0000" + description: "Don't ack with zero items" + pr_number: 1465 + scopes: ["buffers"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 29 + deletions_count: 7 + }, { + sha: "4367ccd73b1151dfe92730c1a016334a7b9fc6b7" + date: "2019-12-31 13:02:47 +0000" + description: "Update Vector description" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 12 + insertions_count: 94 + deletions_count: 103 + }, { + sha: "5a944a5dd0b64592d17383949dd776e8cd9fb8e6" + date: "2019-12-31 13:03:54 +0000" + description: "Call out event" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "47d6d740d38b735c63c496ac1932ceb0da6f4129" + date: "2020-01-03 00:19:22 +0000" + description: "Ensure all wildcard examples are shown" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 64 + insertions_count: 255 + deletions_count: 178 + }, { + sha: "110175a740531bf24324d431b0b71adf18c01948" + date: "2020-01-03 14:40:01 +0000" + description: "Fix links in `CONTRIBUTING.md`" + pr_number: 1475 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "4700b8fac37586f6b6649b88d3eb074b8530690c" + date: "2020-01-03 21:05:45 +0000" + description: "Parse additional metadata" + pr_number: 1249 + scopes: ["kubernetes source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 234 + deletions_count: 107 + }, { + sha: "daa765f407766b54f1469bb5d6dc1c7c9c272ea0" + date: "2020-01-03 19:42:26 +0000" + description: "Move existing 'tcp' sink into 'socket' sink." + pr_number: 1404 + scopes: ["new sink"] + type: "feat" + breaking_change: true + author: "Spencer T Brody" + files_count: 21 + insertions_count: 915 + deletions_count: 263 + }, { + sha: "b10d2c56c9b095795af86a5003a8cb0e0e8d1874" + date: "2020-01-03 19:45:55 +0000" + description: "Merge existing 'tcp' and 'udp' sources into a single 'socket' source" + pr_number: 1485 + scopes: ["new source"] + type: "feat" + breaking_change: true + author: "Spencer T Brody" + files_count: 27 + insertions_count: 1013 + deletions_count: 731 + }, { + sha: "c70b5c426fa5d2155f96284216cc679462fd31d7" + date: "2020-01-04 15:41:24 +0000" + description: "Provide timestamp" + pr_number: 1458 + scopes: ["splunk_hec source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 32 + deletions_count: 14 + }, { + sha: "736918231c75ed25c385ad44d3de2a2f8da9953e" + date: "2020-01-04 16:55:08 +0000" + description: "Add retry logic to `kubernetes source` tests" + pr_number: 1413 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 83 + deletions_count: 53 + }, { + sha: "b4dee2a70788de256762f02e2392051120f19278" + date: "2020-01-04 12:51:28 +0000" + description: "Re-enable buffering reclaim test" + pr_number: 1474 + scopes: ["buffers"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 18 + deletions_count: 13 + }, { + sha: "25406f63c259d0db5339dc7a24e480bc971a46fc" + date: "2020-01-04 12:55:15 +0000" + description: "Cleanup `tcp` and `udp` references in favor of the new `socket` source and sink" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 13 + insertions_count: 27 + deletions_count: 1095 + }, { + sha: "c1b86db43bda99895d402d1549a977f959e770ff" + date: "2020-01-04 13:02:55 +0000" + description: "Redirect TCP/UDP components to the new Socket components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 18 + deletions_count: 3 + }, { + sha: "9657ec9fd25f7a1517bbf5e8ba26351f66b7f897" + date: "2020-01-06 21:07:16 +0000" + description: "Backoff reads to inactive files" + pr_number: 1476 + scopes: ["file source"] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 41 + deletions_count: 6 + }, { + sha: "c4acea3f263dded915a04ba71827230ea463a9cd" + date: "2020-01-07 14:49:03 +0000" + description: "Initial `prometheus` source implementation" + pr_number: 1264 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 32 + insertions_count: 2363 + deletions_count: 409 + }, { + sha: "ac34024ea3f977be880536d8e612acd34db95abb" + date: "2020-01-08 10:02:34 +0000" + description: "Update benches and minor cleanup" + pr_number: 1487 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 7 + insertions_count: 77 + deletions_count: 23 + }, { + sha: "0a11a36f41143f0c7f39096dc7ad19698d70231d" + date: "2020-01-09 11:01:16 +0000" + description: "Add compiling docs for Windows" + pr_number: 1486 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 116 + deletions_count: 0 + }, { + sha: "b0819fdb10cb533482d3f6750e12733db1b7e0ec" + date: "2020-01-09 14:31:55 +0000" + description: "Verify building of the Nix package" + pr_number: 1432 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 6 + insertions_count: 100 + deletions_count: 1 + }, { + sha: "a0f5e54e041c7df6df29cb79f7d9e5d282fb1c1f" + date: "2020-01-09 12:19:24 +0000" + description: "Fix newline style" + pr_number: null + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "76686d512e98e0228c65426bffe8f18c1a33ba8f" + date: "2020-01-09 19:33:47 +0000" + description: "Document `endpoint` parameter for `aws_s3` sink" + pr_number: 1497 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 55 + deletions_count: 10 + }, { + sha: "5cfcba654a931b805dc043c327208502e368b535" + date: "2020-01-10 19:10:56 +0000" + description: "Add `node_modules` to `.gitignore`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 1 + deletions_count: 10 + }, { + sha: "ed0d50ff853946b6efaeaaf4c0a70b36ecaaf8d1" + date: "2020-01-10 19:09:42 +0000" + description: "Don't list unused architectures for Raspbian" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "606c08b552e3a6ea3ec6ab81056770cd81039aca" + date: "2020-01-10 15:41:21 +0000" + description: "Ignore recliam test" + pr_number: 1508 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "767f25133cdb024af24bb50c1699b5a05f520c4e" + date: "2020-01-10 18:02:35 +0000" + description: "Update messaging to follow messaging framework" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 6 + insertions_count: 29 + deletions_count: 39 + }, { + sha: "20d903396f49ceaf1f60b07f06f941a42080ad8f" + date: "2020-01-10 18:05:01 +0000" + description: "Simplify tag line" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7bd10316457a8d03985ac8eb112934ca632b995f" + date: "2020-01-10 18:13:40 +0000" + description: "Less words in the tag line" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1314477e296d1648f64f6d774f7399da879d9c46" + date: "2020-01-11 15:27:07 +0000" + description: "Add additional container filters" + pr_number: 1324 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 217 + deletions_count: 40 + }, { + sha: "04ebc367d7e277a411a402265b18b4d010a8863f" + date: "2020-01-11 16:52:21 +0000" + description: "Merge fix" + pr_number: 1512 + scopes: ["docker source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "657d704011e6af529bdec7fb13358784d81e7326" + date: "2020-01-11 22:53:02 +0000" + description: "Update metric.md: fix typo" + pr_number: 1511 + scopes: [] + type: "docs" + breaking_change: false + author: "Evgenii Terechkov" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "2e1afa9ccd2f1fac0db2833642e0e7145c27e89f" + date: "2020-01-12 12:12:49 +0000" + description: "Use comma delim server list instead of an array" + pr_number: 1502 + scopes: ["kafka sink"] + type: "chore" + breaking_change: true + author: "Lucio Franco" + files_count: 4 + insertions_count: 16 + deletions_count: 19 + }, { + sha: "b3c39b55b338ed58b38aab76b0c2318f8bd9f1e5" + date: "2020-01-12 12:14:53 +0000" + description: "Rename `buffer.num_items` to `buffer.max_events`" + pr_number: 1495 + scopes: ["config"] + type: "chore" + breaking_change: true + author: "Lucio Franco" + files_count: 17 + insertions_count: 318 + deletions_count: 318 + }, { + sha: "0d83d412c924b1f1b1766fb11fd3499443644242" + date: "2020-01-12 12:55:53 +0000" + description: "Rename `basic_auth` to `auth` and unflatten" + pr_number: 1494 + scopes: ["config"] + type: "chore" + breaking_change: true + author: "Lucio Franco" + files_count: 28 + insertions_count: 417 + deletions_count: 213 + }, { + sha: "9b2d7a1d2afd9e6a77bc08aeca37d7cc36493ef8" + date: "2020-01-12 13:15:43 +0000" + description: "Fix metrics data model typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "c517a59a8d37ad8bfa2b77783a522bd227250441" + date: "2020-01-12 22:25:21 +0000" + description: "Use shiplift's since for filtering logs" + pr_number: 1513 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "6345f5fc8ee65a95685daa45ac8f296359130c11" + date: "2020-01-12 14:31:30 +0000" + description: "Nest `batch_*` and `request_*` sink config options" + pr_number: 1493 + scopes: ["config"] + type: "chore" + breaking_change: true + author: "Lucio Franco" + files_count: 132 + insertions_count: 4106 + deletions_count: 3319 + }, { + sha: "203f3bf103c2d86bf38f96ab445b3ec5f0505f2d" + date: "2020-01-12 14:48:51 +0000" + description: "Update homepage language" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 22 + deletions_count: 19 + }, { + sha: "d7f736cf0745e5c6d1f791c001d2ff8e3c9b4c65" + date: "2020-01-12 14:50:59 +0000" + description: "Fix generate failure for metrics typo" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "3617aefe9f213aa8bb1fb22223043e8c7175ec10" + date: "2020-01-13 17:05:30 +0000" + description: "Run `test-stable` tests using `x86_64-unknown-linux-musl` target" + pr_number: 1505 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 21 + deletions_count: 54 + }, { + sha: "d9e9e09bbcf6a611bd872ed51e6d09cde54b56d5" + date: "2020-01-13 22:46:00 +0000" + description: "Fix tests for NixOS" + pr_number: 1522 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "9b104ff066e5b7e1f5766239b82978adffba60cd" + date: "2020-01-13 18:08:26 +0000" + description: "Clairfy `start_at_beginning`" + pr_number: 1523 + scopes: ["file source"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 7 + deletions_count: 5 + }, { + sha: "efec952131366a10b76778b7343958db0122d6dc" + date: "2020-01-13 18:50:17 +0000" + description: "Use fibonacci instead of fixed backoff" + pr_number: 1006 + scopes: ["networking"] + type: "enhancement" + breaking_change: true + author: "Luke Steensen" + files_count: 27 + insertions_count: 573 + deletions_count: 104 + }, { + sha: "35dcdfed8acad2e92c08f79dfae196d91cacdaf2" + date: "2020-01-14 12:50:43 +0000" + description: "Set `type` and `inputs` as required fields" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 51 + insertions_count: 690 + deletions_count: 1421 + }, { + sha: "560644d4f27c4b8bb245eb0fc9f0b4a88a24fd48" + date: "2020-01-14 20:32:14 +0000" + description: "Rename `line` field to `message`" + pr_number: 1457 + scopes: ["splunk_hec source"] + type: "enhancement" + breaking_change: true + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 33 + deletions_count: 5 + }, { + sha: "f11fedd726b4da2f20e834e24d33e084fd00e7e5" + date: "2020-01-15 16:04:11 +0000" + description: "Add unix domain socket support to the 'socket` source" + pr_number: 1489 + scopes: ["socket source"] + type: "feat" + breaking_change: false + author: "Spencer T Brody" + files_count: 8 + insertions_count: 335 + deletions_count: 101 + }, { + sha: "e11ba357543b67841b3c38aab214aa29f7d34f49" + date: "2020-01-16 13:38:07 +0000" + description: "Deprecate `LogEvent::into_value` in favor of `LogEvent::remove`" + pr_number: 1528 + scopes: ["log data model"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 6 + }, { + sha: "fe0e43a60b0ccb4dc7980b8b1edfa5e7b074b189" + date: "2020-01-16 13:38:21 +0000" + description: "Use `.iter()` instead of `.into_iter()` for arrays" + pr_number: 1529 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "fff92728c9490824ff9d0ae76669adc901bb5499" + date: "2020-01-17 14:32:43 +0000" + description: "Remove distinction between explicit and implicit fields" + pr_number: 1530 + scopes: ["log data model"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 49 + insertions_count: 426 + deletions_count: 574 + }, { + sha: "41be43d7ce8ac4d665525bd15bda044d4822c779" + date: "2020-01-17 14:59:29 +0000" + description: "Switch from libsystemd to piping journalctl" + pr_number: 1526 + scopes: ["journald source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 200 + deletions_count: 453 + }, { + sha: "b2cdcdabd4535e4d3c40fd656a1fcbcc37ef8804" + date: "2020-01-19 14:09:14 +0000" + description: "Homepage language cleanup" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 9 + insertions_count: 103 + deletions_count: 258 + }, { + sha: "34735be2dd5f51d94cccca5b7d57219f24e6de11" + date: "2020-01-19 14:11:53 +0000" + description: "Customize toggle button" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 189 + deletions_count: 0 + }, { + sha: "6758f6a7ff7809c2f2c50137e2db9a005355ac26" + date: "2020-01-19 15:37:06 +0000" + description: "Add free language" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "b29e72cebcbb4314e4c82e03b88a9f7d2c007d25" + date: "2020-01-19 17:34:24 +0000" + description: "Add modern language" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 3 + deletions_count: 4 + }, { + sha: "d0aba1b3f4166c490785d50f12674a19277a471a" + date: "2020-01-20 14:36:34 +0000" + description: "Add a new option for specifying indexed fields to `splunk_hec` sink" + pr_number: 1537 + scopes: ["splunk_hec sink"] + type: "enhancement" + breaking_change: true + author: "Alexander Rodin" + files_count: 5 + insertions_count: 87 + deletions_count: 22 + }, { + sha: "60c18c26e75ad5397a3cd1ef1779534069118181" + date: "2020-01-20 12:20:56 +0000" + description: "Remove trailing spaces to fix `check-fmt`" + pr_number: null + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "16decce8228a483ddac5eef08626bff75886a9df" + date: "2020-01-20 15:45:33 +0000" + description: "Cleanup \"free\" language" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 7 + insertions_count: 31 + deletions_count: 21 + }, { + sha: "73c2d08ac37b1c587059697125451ea23fe23cc4" + date: "2020-01-20 16:31:18 +0000" + description: "Initial `logfmt_parser` transform impl" + pr_number: 1541 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 12 + insertions_count: 510 + deletions_count: 1 + }, { + sha: "c2d52993a292650b9413c59f7cb40ba6535852ee" + date: "2020-01-20 18:56:16 +0000" + description: "add assume_role" + pr_number: 1554 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 7 + insertions_count: 93 + deletions_count: 10 + }, { + sha: "ccee42734d0c1af89e84d007d539a663b6b87f33" + date: "2020-01-20 20:00:25 +0000" + description: "Initial `logplex` source implementation" + pr_number: 1540 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 11 + insertions_count: 408 + deletions_count: 1 + }, { + sha: "b290ceb2aee4242834d434a4bd092fe2a0cece3b" + date: "2020-01-21 14:11:30 +0000" + description: "Remove build and verify steps for non-releases" + pr_number: 1558 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 46 + deletions_count: 20 + }, { + sha: "b07f99ea1e3cb39a9f23471684e881a610cc3d36" + date: "2020-01-22 00:29:51 +0000" + description: "Add PR length guidelines to contrib" + pr_number: 1539 + scopes: ["docs"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 7 + deletions_count: 1 + }] + } + "0.11.1": { + date: "2020-12-17" + codename: "" + whats_next: [] + commits: [{ + sha: "d6de536d361c5edbbe056945b8d809e5a1999c50" + date: "2020-12-03 07:38:50 UTC" + description: "Change logs level on request" + pr_number: 5337 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "7e7bf612dd86867db774c560c1cf95dc6c390a1f" + date: "2020-12-04 06:27:26 UTC" + description: "Set content encoding header when compression is on" + pr_number: 5355 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Samuel Gabel" + files_count: 1 + insertions_count: 20 + deletions_count: 0 + }, { + sha: "90ac946d7f40f18cfde563bf0814fa963148010c" + date: "2020-12-05 09:58:28 UTC" + description: "Include config format test only with required features" + pr_number: 5356 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 11 + deletions_count: 6 + }, { + sha: "4a59b403b0304036566af27c4ce9a6cd475b5f11" + date: "2020-12-09 01:29:43 UTC" + description: "Set Accept-Encoding to identity for HTTP client" + pr_number: 5442 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 48 + deletions_count: 6 + }, { + sha: "19efdca02e8644d8a375b6f4a30a3d66e1fba0bc" + date: "2020-12-09 21:42:10 UTC" + description: "Add support for detecting glibc version to installer script" + pr_number: 5421 + scopes: ["setup"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 27 + deletions_count: 3 + }, { + sha: "543439ec44390ca265a26d175739ea16df4893d1" + date: "2020-12-10 08:15:35 UTC" + description: "Reuse buffers" + pr_number: 5344 + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 16 + insertions_count: 347 + deletions_count: 108 + }, { + sha: "e070bb7a307e1d71a2be32adda99839581325c9d" + date: "2020-12-17 08:11:48 UTC" + description: "Fix wrong log level" + pr_number: 5558 + scopes: ["coercer transform"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "b82d330fb492968a9aa6d8539e7228acd6c547ac" + date: "2020-12-17 22:17:46 UTC" + description: "Remove duplicated event" + pr_number: 5451 + scopes: ["vector sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 0 + deletions_count: 17 + }, { + sha: "2215e49422b6d93b307ce29cbc4d89511b1f8a89" + date: "2020-12-18 04:46:43 UTC" + description: "Update hyper to work around the docker EOF errors" + pr_number: 5561 + scopes: ["docker_logs source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 4 + deletions_count: 2 + }] + } + "0.8.1": { + date: "2020-03-04" + codename: "" + whats_next: [] + commits: [{ + sha: "5d9aa496338aff6926e8d447e150858468b8a603" + date: "2020-03-03 15:21:30 +0000" + description: "Correct typetag" + pr_number: 1972 + scopes: ["humio_logs sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }] + } + "0.11.0": { + date: "2020-12-01" + codename: "Kubernetes Pipes" + description: """ + The Vector team is pleased to announce 0.11.0. This release + is focused on Kubernetes. In addition to the new, + first-class Kubernetes integration, we launched a number of + complementary features that make Vector a prime candidate + for collecting and processing your Kubernetes logs. Check + out the [highlights](#highlights) and [changelog](#changelog) + for more details. + """ + whats_next: [{ + title: "Vector's own observability" + description: """ + We will be working to improve Vector's own observability with high-quality internal metrics, logs, + CLI tools, and dashboards. This will likely be the focus of 0.13. + """ + }, { + title: "Kubernetes aggregator role" + description: """ + In addition to Vector's observability, 0.12 will include support for + deploying Vector into the [aggregator role](https://vector.dev/docs/setup/deployment/roles/#aggregator) + within Kubernetes. This will allow you to build end-to-end observability + piplines in Kubernetes with Vector. Distributing processing on the edge, + centralizing it with an aggregator, or both. If you are interested in + beta testing, please [join our chat](https://chat.vector.dev) and let us + know. + """ + }, { + title: "Vector Remap Language (VRL)" + description: """ + Finally, 0.12 will introduce the new Vector Remap Language.\tThis is a + Rust-native syntax designed for safe and efficient data mapping. It's an + ultra-performant middle ground between fundamental transforms like + `add_fields` and a full blown runtime like `lua`. + + If interested, you can beta test via the [`remap` transform](https://vector.dev/docs/reference/transforms/remap/) + and use the [Vector Remap Language reference](https://vector.dev/docs/reference/vrl/) + to write scripts. + """ + }] + commits: [{ + sha: "5308b865614987b50bbeecf2167d37ddfc326f55" + date: "2020-07-22 11:15:07 -0400" + description: "Initial `kubernetes_logs` implementation" + pr_number: 2653 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "MOZGIII" + files_count: 60 + insertions_count: 6078 + deletions_count: 25 + }, { + sha: "ca69b000e00b5c3e668b3998a62bc7ecfbc1f51a" + date: "2020-07-22 18:08:53 -0400" + description: "Allow '=' in tag values" + pr_number: 3090 + scopes: ["prometheus source"] + type: "fix" + breaking_change: false + author: "Adam Casey" + files_count: 1 + insertions_count: 84 + deletions_count: 18 + }, { + sha: "94f6db43db5f25b6c1fb005c8a5bd3a2a211a06b" + date: "2020-07-26 17:03:42 -0400" + description: "Cleanup `list` command" + pr_number: 3099 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 43 + deletions_count: 2 + }, { + sha: "622dbf78f3335ca275251f74c64c7a16a93ec9c2" + date: "2020-07-28 09:18:12 -0400" + description: "Upgrade all VecBuffer sinks to allow setting `max_bytes`" + pr_number: 3190 + scopes: ["buffers", "sinks"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 17 + insertions_count: 529 + deletions_count: 231 + }, { + sha: "6a6c390b7e7af2b977ff088245e94e31cefb7640" + date: "2020-07-28 12:04:38 -0400" + description: "Correctly assign capture group fields" + pr_number: 3164 + scopes: ["regex_parser transform"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 124 + deletions_count: 69 + }, { + sha: "24da4475d798460df64d4c2728f9cde823b4d73b" + date: "2020-07-28 21:02:57 -0400" + description: "Add max_length to UDP" + pr_number: 3236 + scopes: ["socket source"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 5 + insertions_count: 26 + deletions_count: 16 + }, { + sha: "7d0dbc9ba5497bae2a217f6bbd3a7fba744a832d" + date: "2020-07-28 23:50:24 -0400" + description: "Instrument \"stdin\" source" + pr_number: 3151 + scopes: ["observability", "stdin source"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 56 + deletions_count: 3 + }, { + sha: "9745e9a3560570ba17afd5148e37e329affec9a2" + date: "2020-07-28 16:14:56 -0400" + description: "Add received and invalid line events" + pr_number: 3241 + scopes: ["observability", "journald source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 57 + deletions_count: 1 + }, { + sha: "9aed5e08ede334dee921df7bd5aaa6ef2ecfd3c1" + date: "2020-07-30 15:04:01 -0400" + description: "Provide error context on parse error" + pr_number: 3278 + scopes: ["prometheus source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 11 + deletions_count: 1 + }, { + sha: "0d497ec9f350f723172efab2ab2462c46de70c4b" + date: "2020-07-31 21:44:10 -0400" + description: "Correct an error with line aggregation in `continue_through` and `halt_before`" + pr_number: 3262 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 140 + deletions_count: 10 + }, { + sha: "9117c9dfbf44100d60b6c9c5fd824835650918ec" + date: "2020-07-31 22:46:53 -0400" + description: "Instrument \"kafka\" source" + pr_number: 3187 + scopes: ["observability", "kafka source"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 95 + deletions_count: 17 + }, { + sha: "814102df3c8a98affbe15a7673a63d6668cc4170" + date: "2020-08-03 14:05:38 -0400" + description: "Make sourcetype explicit on Splunk sink" + pr_number: 3297 + scopes: ["splunk_hec sink"] + type: "fix" + breaking_change: true + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 17 + deletions_count: 12 + }, { + sha: "dc3848d176f500588a7c0446aee731e0a6743f4c" + date: "2020-08-03 14:07:34 -0400" + description: "Allow configuration of type field" + pr_number: 3300 + scopes: ["humio_logs sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 70 + deletions_count: 1 + }, { + sha: "67adfd6da35cd341b59200c8cf5d506ea6ab3835" + date: "2020-08-04 11:07:48 -0400" + description: "Update instrumentation" + pr_number: 3317 + scopes: ["observability", "prometheus source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 60 + deletions_count: 9 + }, { + sha: "70b0b13d297fc34d446497729803ad885960e122" + date: "2020-08-04 11:09:04 -0400" + description: "Update instrumentation" + pr_number: 3315 + scopes: ["observability", "syslog source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 36 + deletions_count: 21 + }, { + sha: "059e160cb599ee007affa7f19223a10a40af222f" + date: "2020-08-04 21:36:27 -0400" + description: "Add internal events for `http` source" + pr_number: 3264 + scopes: ["observability", "http source"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 3 + insertions_count: 72 + deletions_count: 3 + }, { + sha: "3c06a27444cccd530c4a1c5ed09f5c7054045d94" + date: "2020-08-04 17:21:01 -0400" + description: "Make sourcetype templatable" + pr_number: 3351 + scopes: ["splunk_hec sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 39 + deletions_count: 4 + }, { + sha: "851d00114281791f29cc1c68f1b44aa201bb0ca4" + date: "2020-08-05 09:47:23 -0400" + description: "Add events" + pr_number: 3254 + scopes: ["observability", "statsd source"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 121 + deletions_count: 8 + }, { + sha: "e7d3015bf548aa76a8b174576ded9b4932aace53" + date: "2020-08-05 09:19:15 -0400" + description: "Add instrumentation" + pr_number: 3345 + scopes: ["observability", "docker source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 311 + deletions_count: 77 + }, { + sha: "1101f26457df26e4d12ede3c93a6d331ac82dd13" + date: "2020-08-05 09:21:42 -0400" + description: "Add instrumentation" + pr_number: 3312 + scopes: ["observability", "splunk_hec source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 125 + deletions_count: 13 + }, { + sha: "547c8ad6ac6ad4fa21dbf3a70f6b18570704bed6" + date: "2020-08-05 15:24:05 -0400" + description: "Add DataDog's `distribution` metric" + pr_number: 2913 + scopes: ["data_dog_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 19 + insertions_count: 204 + deletions_count: 26 + }, { + sha: "bd26d0d7bc04652548dc45ee2f60b311027050ce" + date: "2020-08-05 13:26:28 -0400" + description: "Add configuration for source" + pr_number: 3327 + scopes: ["splunk_hec sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 74 + deletions_count: 2 + }, { + sha: "7987c59ba587022bd5c9960618be4bd58636fb52" + date: "2020-08-05 14:41:24 -0400" + description: "Allow configuration of type field (#3300)" + pr_number: 3337 + scopes: ["humio_logs sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 73 + deletions_count: 1 + }, { + sha: "6d126e7656215640ce123dbc5ae6b00cd0a27ef3" + date: "2020-08-05 16:11:33 -0400" + description: "Add source configuration to Humio sink" + pr_number: 3328 + scopes: ["humio_logs sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 44 + deletions_count: 0 + }, { + sha: "6bac9f1c6bc74f2b775f5a18ea25b3796f58a555" + date: "2020-08-09 18:14:42 -0400" + description: "Add instrumentation" + pr_number: 3356 + scopes: ["observability", "logplex source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 64 + deletions_count: 6 + }, { + sha: "0f1556702aa2d7b6901eb1d59164a858c7f79630" + date: "2020-08-11 14:20:44 -0400" + description: "Automatic concurrency management" + pr_number: 3094 + scopes: ["sinks", "networking"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 44 + insertions_count: 2136 + deletions_count: 176 + }, { + sha: "8d1995d0d6243624a540b8dfc824e51ae5d77044" + date: "2020-08-12 15:18:24 -0400" + description: "Remove forwarding to syslog from distributed systemd unit" + pr_number: 3427 + scopes: ["debian platform"] + type: "fix" + breaking_change: true + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "3d434d190c5f6d454141f18922b59b1a5c6072c7" + date: "2020-08-13 13:15:41 -0400" + description: "Add more instrumentation" + pr_number: 3417 + scopes: ["observability", "file source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 10 + insertions_count: 341 + deletions_count: 41 + }, { + sha: "86ccdeabd20edc66ac5a19f04a618cfcedc180e1" + date: "2020-08-13 14:57:54 -0400" + description: "Add instrumentation" + pr_number: 3421 + scopes: ["observability", "ansi_stripper transform"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 101 + deletions_count: 19 + }, { + sha: "b4048d9131fc764c6457463b5a9c1768c4b0ae24" + date: "2020-08-13 14:58:13 -0400" + description: "Add instrumentation" + pr_number: 3419 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 32 + deletions_count: 0 + }, { + sha: "bb601f5fae5ad5a7cd05adafd2d6dc84f0b3e868" + date: "2020-08-13 16:56:50 -0400" + description: "Add instrumentation" + pr_number: 3418 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 70 + deletions_count: 9 + }, { + sha: "3ffc3c30b42a482ce1e1dd4bb68faeac7b4d4b76" + date: "2020-08-13 14:48:29 -0400" + description: "Add additional canned ACLs" + pr_number: 3439 + scopes: ["aws_s3 sink"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "e4e597067bf001d96cf51a9259714debeeb7ca86" + date: "2020-08-14 08:56:33 -0400" + description: "Add \"text\" encoding for metrics" + pr_number: 3434 + scopes: ["codecs", "console sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 257 + deletions_count: 10 + }, { + sha: "40d7dd936a4155409282b5f9e2a43b2989e739ae" + date: "2020-08-15 16:59:21 -0400" + description: "Even more instrumentation" + pr_number: 3436 + scopes: ["observability", "file source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 35 + deletions_count: 9 + }, { + sha: "a9f746c6f8b0666b5c3573e9e67c47632b00b149" + date: "2020-08-19 11:21:36 -0400" + description: "Rewrite parser, improve error handlings " + pr_number: 3286 + scopes: ["prometheus source"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 8 + insertions_count: 1418 + deletions_count: 478 + }, { + sha: "5f6ce7cfa5fea6e8a3078e63d2e04425b8c38c64" + date: "2020-08-19 22:25:56 -0400" + description: "Add events for starting, stopping, and reloading" + pr_number: 3476 + scopes: ["observability", "startup", "shutdown"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 86 + deletions_count: 29 + }, { + sha: "db2e647ce4a08e2602606e036507f46614a2ec2c" + date: "2020-08-20 14:39:42 -0400" + description: "Add Heartbeat" + pr_number: 3502 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 36 + deletions_count: 1 + }, { + sha: "61e806d01d4cc6d2a527b52aa9388d4547f1ebc2" + date: "2020-08-21 20:42:14 -0400" + description: "Allow configuring flags via env vars" + pr_number: 2149 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 44 + deletions_count: 6 + }, { + sha: "da07131539763df44b638780b7bde23764b0cd92" + date: "2020-08-24 00:08:50 -0400" + description: "Add support for gzip compression" + pr_number: 3373 + scopes: ["file sink", "compression"] + type: "enhancement" + breaking_change: false + author: "Ayush Goyal" + files_count: 6 + insertions_count: 137 + deletions_count: 6 + }, { + sha: "c6847c68da9ff175f2a62b285b63d119bf07331b" + date: "2020-08-24 18:53:52 -0400" + description: "Handle Absolute kind for values other than Gauge." + pr_number: 3510 + scopes: ["statsd sink"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 88 + deletions_count: 54 + }, { + sha: "b41b63ac723d2b0de3a52defa27cac10386f667c" + date: "2020-08-25 06:29:39 -0400" + description: "ASN/ISP database support" + pr_number: 3265 + scopes: ["geoip transform"] + type: "feat" + breaking_change: false + author: "Marko Karppinen" + files_count: 6 + insertions_count: 243 + deletions_count: 90 + }, { + sha: "a67d18cac8aad548887933ce9793708c61c23cc4" + date: "2020-08-25 14:43:00 -0400" + description: "Sync all data before finishing" + pr_number: 3475 + scopes: ["file sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 24 + deletions_count: 10 + }, { + sha: "cc284b4f35f1b482b9056a914222edf58bc5acc9" + date: "2020-08-25 08:43:30 -0400" + description: "Enhance instrumentation" + pr_number: 3521 + scopes: ["observability", "json_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 76 + deletions_count: 47 + }, { + sha: "c6e660801a8bf091202bd511656434a5229d8767" + date: "2020-08-25 08:51:16 -0400" + description: "Add and unify events" + pr_number: 3486 + scopes: ["observability", "socket source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 94 + deletions_count: 113 + }, { + sha: "d11587489476c85f44fc9996e900535737a43922" + date: "2020-08-25 12:01:48 -0400" + description: "Enhance instrumentation" + pr_number: 3523 + scopes: ["observability", "regex_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 151 + deletions_count: 102 + }, { + sha: "8b4ff32e23a16e44b0c88848c82e34b724730828" + date: "2020-08-25 14:22:27 -0400" + description: "Add internal events" + pr_number: 3553 + scopes: ["observability", "grok_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 127 + deletions_count: 39 + }, { + sha: "692e683646fb843633e158fe7cb25e5085dafff0" + date: "2020-08-27 21:00:31 -0400" + description: "Add the ability to store pod labels flat" + pr_number: 3598 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 97 + deletions_count: 2 + }, { + sha: "1e180bff8f1635b7d24cd9f4838af63255ec5137" + date: "2020-08-28 11:44:00 -0400" + description: "Store pod labels flat by default, remove the switch" + pr_number: 3602 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 59 + }, { + sha: "2e840a04a9dcd41637adb7b4dd50f5ed948a1e2a" + date: "2020-08-28 13:20:18 -0400" + description: "Add `file` label" + pr_number: 3586 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 10 + deletions_count: 0 + }, { + sha: "f2301e2e583b227718b258e08eb580fe4989a586" + date: "2020-08-28 13:36:40 -0400" + description: "Add more `main` events" + pr_number: 3582 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 79 + deletions_count: 6 + }, { + sha: "680ab9bc3042ab281c76e11ae0251aeda1f97b6b" + date: "2020-08-28 10:43:41 -0400" + description: "Switch off of broadcast channel" + pr_number: 3611 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 16 + deletions_count: 90 + }, { + sha: "b96a04554c971e4a92c872527dc1765140e83d58" + date: "2020-08-28 10:11:07 -0400" + description: "Add internal events" + pr_number: 3593 + scopes: ["observability", "log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 100 + deletions_count: 8 + }, { + sha: "4237b6fff0a333bdeb1623b21482168e827268b8" + date: "2020-08-28 10:29:27 -0400" + description: "Implement some UX improvements for WASM" + pr_number: 3490 + scopes: ["wasm transform"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 8 + insertions_count: 150 + deletions_count: 80 + }, { + sha: "9570f71c8c2a5c6410d928366e722b12fa1e8912" + date: "2020-08-28 15:40:33 -0400" + description: "Adds new Helm template variable for podsLabels." + pr_number: 3610 + scopes: ["kuberentes platform"] + type: "enhancement" + breaking_change: false + author: "nkinkade" + files_count: 2 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "1a4359363c8e65a034e1e13d7b81dfbcb2342b97" + date: "2020-08-31 18:51:32 -0400" + description: "Multiline support" + pr_number: 3607 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 4 + insertions_count: 194 + deletions_count: 83 + }, { + sha: "9adebdef5b5a27567803a6b2d2967551935a689c" + date: "2020-08-31 13:38:28 -0400" + description: "Add internal events" + pr_number: 3577 + scopes: ["observability", "tag_cardinality_limit transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 101 + deletions_count: 17 + }, { + sha: "22dc55246ea82e51c85c1340cb7fe618c8ae40da" + date: "2020-08-31 13:40:10 -0400" + description: "Add internal events" + pr_number: 3554 + scopes: ["observability", "coercer transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 52 + deletions_count: 19 + }, { + sha: "7e2ea2958ba2714b87abcf06ce245b71e48fb838" + date: "2020-09-02 11:08:07 -0400" + description: "Increase rate_limit_num to its maximum" + pr_number: 3655 + scopes: ["http sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "7eb1ea3f58727d06417c88d65da478dd004f121a" + date: "2020-09-03 18:45:42 -0400" + description: "Handle panic on HTTP request to HTTPS source" + pr_number: 3693 + scopes: ["http source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "2f373b470353f435572362f59b7f58c4ae337830" + date: "2020-09-03 15:44:48 -0400" + description: "Handle multiple (intermediate) certificates in PEM files" + pr_number: 3687 + scopes: ["security"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 30 + insertions_count: 1169 + deletions_count: 301 + }, { + sha: "2024ab6a9ecc5167e587e3dae8a864f03ff16a7d" + date: "2020-09-04 09:10:09 -0400" + description: "Add a new options to control the auto concurrency limiter" + pr_number: 3690 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 135 + deletions_count: 43 + }, { + sha: "b374fb6a7e2dfe58ffc18d53912748d287068ad8" + date: "2020-09-04 15:22:49 -0400" + description: "rate_limit_num for GCP pubsub and cloud_storage was wrong" + pr_number: 3656 + scopes: ["gcp platform"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 8 + deletions_count: 1 + }, { + sha: "25602ee60359956bf1e5e152408ee252f24a3e57" + date: "2020-09-04 17:44:22 -0400" + description: "allow using excepted fields" + pr_number: 3716 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 28 + deletions_count: 2 + }, { + sha: "d15ff9e125c672e1c4c4401f9ac040406917d53a" + date: "2020-09-05 17:18:27 -0400" + description: "Fix TcpEventSent" + pr_number: 3720 + scopes: ["observability", "socket sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "b692e3e67af92e4490682429b44235f74eca070d" + date: "2020-09-07 19:31:03 -0400" + description: "Add internal events" + pr_number: 3730 + scopes: ["observability", "swimlanes transform"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 3 + insertions_count: 33 + deletions_count: 0 + }, { + sha: "7e351a225ba8dbbeef9f8db35103a42f4ad5d461" + date: "2020-09-08 20:33:05 -0400" + description: "Apply encoding rules after key prefix" + pr_number: 3770 + scopes: ["gcp_cloud_storage sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 16 + deletions_count: 1 + }, { + sha: "fe68891452955772e9a98fdf2565c6d762295e19" + date: "2020-09-09 08:31:59 -0400" + description: "Add IPv6 supports" + pr_number: 3699 + scopes: ["socket sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 4 + insertions_count: 77 + deletions_count: 47 + }, { + sha: "431bd8686b52eee95c59e4c68ea390395ad7ac48" + date: "2020-09-09 15:50:54 -0400" + description: "Apply encoding rules right before encoding" + pr_number: 3775 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Hoàng Đức Hiếu" + files_count: 1 + insertions_count: 29 + deletions_count: 1 + }, { + sha: "eb2426c271c90caa2e0ddec855eccc781f9dd86b" + date: "2020-09-09 15:58:04 -0400" + description: "Add retry delay" + pr_number: 3732 + scopes: ["docker source"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 2 + insertions_count: 37 + deletions_count: 15 + }, { + sha: "7c5aecb9a7741a8816b4f087c920fbe802613862" + date: "2020-09-09 15:36:20 -0400" + description: "New `remap` transform" + pr_number: 3341 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 21 + insertions_count: 3164 + deletions_count: 3 + }, { + sha: "6869120e280541add19777882f20e2ddb4ca9c3e" + date: "2020-09-09 19:40:28 -0400" + description: "Add support for `summary` metric" + pr_number: 3726 + scopes: ["log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 65 + deletions_count: 0 + }, { + sha: "52cd9a777963154bf619975df72e3c52726a4228" + date: "2020-09-09 13:23:24 -0400" + description: "Enhance checkpoint errors with file name" + pr_number: 3782 + scopes: ["observability", "journald source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 14 + deletions_count: 10 + }, { + sha: "94def0fb9ec971539984fdcf42dacdcfd0aa3200" + date: "2020-09-10 16:17:03 -0400" + description: "Add internal events" + pr_number: 3812 + scopes: ["observability", "reduce transform"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 34 + deletions_count: 0 + }, { + sha: "ff555aaed1208742ffb71be5242b52ed258eaa06" + date: "2020-09-11 15:53:27 -0400" + description: "add internal events" + pr_number: 3809 + scopes: ["observability", "dedupe transform"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 43 + deletions_count: 5 + }, { + sha: "f03898e1b6a9d78ce40c48b19675d8e7cb67f8c5" + date: "2020-09-11 15:53:51 -0400" + description: "add internal events" + pr_number: 3807 + scopes: ["observability", "tokenizer transform"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 74 + deletions_count: 12 + }, { + sha: "9c83300f005648d3a5292754b7127692b9e72e53" + date: "2020-09-14 12:29:16 -0400" + description: "apply encoding rules in sinks" + pr_number: 3766 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 10 + insertions_count: 156 + deletions_count: 29 + }, { + sha: "9df7255da7d7f602e62e0adf1205c417db83d287" + date: "2020-09-15 18:50:03 -0400" + description: "Support `summary` statistic" + pr_number: 3846 + scopes: ["influxdb_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 68 + deletions_count: 9 + }, { + sha: "f8696563012d1840e9e6a52206b03634a6bfdb4c" + date: "2020-09-15 18:58:20 -0400" + description: "Support datadog `distribution` metric " + pr_number: 3725 + scopes: ["datadog_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 202 + deletions_count: 51 + }, { + sha: "654bad152b4cd0931957c0710d861f215206e4f0" + date: "2020-09-15 19:12:22 -0400" + description: "Regularize internal event messages" + pr_number: 3850 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 29 + insertions_count: 110 + deletions_count: 73 + }, { + sha: "8f8084e8c3b6810dd095980c7d2a0c0e308a609f" + date: "2020-09-15 20:43:19 -0400" + description: "Recreate trigger" + pr_number: 3855 + scopes: ["reload"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 70 + deletions_count: 19 + }, { + sha: "85f8a6f4e622d26073485f156f904bdfeb49c6b2" + date: "2020-09-15 20:49:29 -0400" + description: "Enable tls by default for `papertrail` and `datadog_logs` sinks" + pr_number: 3824 + scopes: ["networking", "security"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 9 + insertions_count: 38 + deletions_count: 19 + }, { + sha: "823308f9bfdd061c62356bef6c77390f9a17cfce" + date: "2020-09-17 09:35:24 -0400" + description: "Improve retry error messages" + pr_number: 3861 + scopes: ["observability", "elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 4 + insertions_count: 30 + deletions_count: 48 + }, { + sha: "39433cd073fcb349915d64b1b3890ed9def841f9" + date: "2020-09-17 13:45:55 -0400" + description: "Initial `apache_metrics` source implementation" + pr_number: 3704 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Jesse Szwedko" + files_count: 10 + insertions_count: 1581 + deletions_count: 0 + }, { + sha: "3e6dac044cbc3f8950cb91dac7a4f455bdc60189" + date: "2020-09-17 21:40:21 -0400" + description: "New GraphQL API" + pr_number: 3708 + scopes: ["observability"] + type: "feat" + breaking_change: false + author: "Lee Benson" + files_count: 21 + insertions_count: 789 + deletions_count: 7 + }, { + sha: "0fc7c55feff53af813c2a2717f8e8ead986fb333" + date: "2020-09-18 09:09:57 -0400" + description: "Accept more timestamp patterns in `to_timestamp`" + pr_number: 3989 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 1 + insertions_count: 19 + deletions_count: 9 + }, { + sha: "bb5993d349a14d50b8e7d47b9f609e1e30a89400" + date: "2020-09-18 17:08:03 -0400" + description: "Include container_name in kubernetes_logs" + pr_number: 4018 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 3 + insertions_count: 47 + deletions_count: 3 + }, { + sha: "778081044b83058b4b555311fa8ae33e17853911" + date: "2020-09-20 12:26:42 -0400" + description: "instrument and guard against double poll" + pr_number: 4024 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 90 + deletions_count: 15 + }, { + sha: "e6a84eb6bd48281ebb53a1225f39099f533251a0" + date: "2020-09-21 00:37:57 -0400" + description: "Add reconnect loop to `TcpSink`" + pr_number: 4030 + scopes: ["socket sink"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 116 + deletions_count: 51 + }, { + sha: "316ec957214dc10aed7a8288ebe521291c91a89a" + date: "2020-09-21 22:57:13 -0400" + description: "Insert timestamp into stackdriver message" + pr_number: 3833 + scopes: ["gcp_stackdriver sink"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 48 + deletions_count: 6 + }, { + sha: "b995485d08988c37e8d35730b3fc0b0a9b017be8" + date: "2020-09-24 05:41:32 -0400" + description: "Start reading logs at checkpoint" + pr_number: 4043 + scopes: ["kubernetes_logs source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 37 + deletions_count: 1 + }, { + sha: "ad77d20302cd77f5f00262a32d75d592a711fa86" + date: "2020-09-25 08:31:03 -0400" + description: "GraphQL client" + pr_number: 3778 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 13 + insertions_count: 2113 + deletions_count: 11 + }, { + sha: "cfa8e78193e4ec5f0c7132c6edda90ae68c2ca2d" + date: "2020-09-25 16:55:15 -0400" + description: "Avoid calling `peer_addr`" + pr_number: 3822 + scopes: ["networking"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 5 + insertions_count: 45 + deletions_count: 39 + }, { + sha: "1ceb8fb7d290eedf80330065be102827628189fa" + date: "2020-09-25 10:02:27 -0400" + description: "Fix tokio signal behavior to handle a loop of signals" + pr_number: 4095 + scopes: ["shutdown"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 9 + deletions_count: 7 + }, { + sha: "fba1de4463b41a7fd42d626f3cb6b4971278c3ed" + date: "2020-09-26 14:53:52 -0400" + description: "add `format_timestamp` function" + pr_number: 4075 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 151 + deletions_count: 3 + }, { + sha: "c66a7307e92a327c1c046398b9a4fa495d028ee6" + date: "2020-09-27 15:48:22 -0400" + description: "add `contains` function" + pr_number: 4090 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 247 + deletions_count: 39 + }, { + sha: "f7858e83bbf1452afe5401cacbcb0dba44b2f1e1" + date: "2020-09-27 17:40:09 -0400" + description: "add `slice` function" + pr_number: 4092 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 290 + deletions_count: 4 + }, { + sha: "5bfaff9527d93158c8570532fd0ae4eba890658f" + date: "2020-09-28 08:13:02 -0400" + description: "add `tokenize` function" + pr_number: 4093 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 9 + insertions_count: 261 + deletions_count: 130 + }, { + sha: "c5ec225ac48bc0d1de08bf84b73569cb75a307b2" + date: "2020-09-28 10:59:47 -0400" + description: "more robust handling of split reads" + pr_number: 4089 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 182 + deletions_count: 91 + }, { + sha: "83f1887b77cd1986b777da167e575386e6b7327c" + date: "2020-09-28 20:11:17 -0400" + description: "Add container_image and pod_node_name annotations" + pr_number: 4020 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 162 + deletions_count: 5 + }, { + sha: "2bef814916bc54db2da09cd5594d7c6939b017a4" + date: "2020-09-28 16:46:18 -0400" + description: "Emit overwrite logs as debug (#3803)" + pr_number: 4088 + scopes: ["observability", "add_fields transform"] + type: "feat" + breaking_change: false + author: "Spencer Gilbert" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "95df9244f7444cc52ec16c9aefd23c3e5f0310ff" + date: "2020-09-29 09:33:25 -0400" + description: "Emit warning on incomplete UDP sent" + pr_number: 4034 + scopes: ["observability", "socket sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 3 + insertions_count: 37 + deletions_count: 1 + }, { + sha: "8c9a72b87608abb782f6812bdf8e34b307efb6fa" + date: "2020-09-29 13:05:59 -0400" + description: "add `strip_ansi_escape_codes` function" + pr_number: 4170 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 100 + deletions_count: 2 + }, { + sha: "2a1ef473e84138ea06d5ba82ba4ae1f6288a0378" + date: "2020-09-29 18:00:19 -0400" + description: "Recompute configuration paths" + pr_number: 4094 + scopes: ["reload"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 79 + deletions_count: 17 + }, { + sha: "9f7cbad9d65f86c083165027b5745587fdc63305" + date: "2020-09-29 18:19:31 -0400" + description: "Wait on Windows after components shutdown" + pr_number: 4102 + scopes: ["reload", "windows platform"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 8 + deletions_count: 3 + }, { + sha: "43329e8e7b5715d89d92b262d7b1bfee25e796a1" + date: "2020-09-30 08:18:55 -0400" + description: "Do not serialize None in in-flight config" + pr_number: 4192 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 15 + deletions_count: 1 + }, { + sha: "3a1e96892283e907a3eb1f031d94e3c29f9bf2a2" + date: "2020-09-30 12:25:32 -0400" + description: "add `sha2` function" + pr_number: 4188 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 6 + insertions_count: 202 + deletions_count: 4 + }, { + sha: "5ad63f7d146727326300d96d6be5f8c0dbb747d1" + date: "2020-09-30 13:38:09 -0400" + description: "add `sha3` function" + pr_number: 4198 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 215 + deletions_count: 10 + }, { + sha: "2b52002740b1f595e1eeac77ca8b0326a70fd9e5" + date: "2020-09-30 21:44:42 -0400" + description: "add field's value in warn message when failing to parse" + pr_number: 4215 + scopes: ["observability", "json_parser transform"] + type: "enhancement" + breaking_change: false + author: "Mathieu Stefani" + files_count: 2 + insertions_count: 8 + deletions_count: 4 + }, { + sha: "d645cae992706340cd8abf84039c8a4c5b808499" + date: "2020-09-30 22:25:14 -0400" + description: "Added distroless-libc and distroless-static docker container bases" + pr_number: 4236 + scopes: ["docker platform"] + type: "enhancement" + breaking_change: false + author: "Rick Richardson" + files_count: 4 + insertions_count: 51 + deletions_count: 0 + }, { + sha: "9fceb7ddf0504b475cc59f971d04d8905b241b23" + date: "2020-10-01 15:07:51 -0400" + description: "Add `parse_duration` function" + pr_number: 4186 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 240 + deletions_count: 4 + }, { + sha: "1e7abc23850f3cae83b4672698f89f34a594038c" + date: "2020-10-01 15:47:22 -0400" + description: "Add support for `summary` metric" + pr_number: 4032 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 150 + deletions_count: 62 + }, { + sha: "6b2e43cb29428ffa8a341e80a1efaf4bac589515" + date: "2020-10-01 14:53:54 -0400" + description: "Sink should emit event on invalid metric" + pr_number: 3636 + scopes: ["statsd sink"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 4 + insertions_count: 42 + deletions_count: 6 + }, { + sha: "6d585c96c0ccbfbed02eb883d3a8cf16d33786e0" + date: "2020-10-01 07:14:21 -0400" + description: "Ensure server starts immediately" + pr_number: 4239 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "Rick Richardson" + files_count: 1 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "24ff76f1899a36122ced0fc38846708a1bb2b807" + date: "2020-10-01 16:33:11 -0400" + description: "add a way to register and run vector as a service" + pr_number: 2896 + scopes: ["windows platform"] + type: "feat" + breaking_change: false + author: "Mathieu Stefani" + files_count: 12 + insertions_count: 938 + deletions_count: 220 + }, { + sha: "57df969c7a2498d540946e73628f7d169c870914" + date: "2020-10-01 22:44:53 -0400" + description: "Fix 101 exit code on `api.enabled = true`" + pr_number: 4254 + scopes: ["startup"] + type: "fix" + breaking_change: false + author: "Lee Benson" + files_count: 1 + insertions_count: 16 + deletions_count: 13 + }, { + sha: "a2cc2440307cd3fe34ba99d80445260286dace56" + date: "2020-10-01 22:51:19 -0400" + description: "Add EKS Web Identity Support" + pr_number: 4049 + scopes: ["auth", "kubernetes platform", "aws service"] + type: "enhancement" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 1 + insertions_count: 44 + deletions_count: 3 + }, { + sha: "d93dc92b964bc081ccc3303b35871ee256853bdd" + date: "2020-10-02 14:46:12 -0400" + description: "Initial GraphQL topology" + pr_number: 4191 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 8 + insertions_count: 138 + deletions_count: 36 + }, { + sha: "f7a9b676c823ecfdda3ff14f268fd83d80056e17" + date: "2020-10-02 13:23:25 -0400" + description: "Set host key correctly" + pr_number: 4229 + scopes: ["humio_logs sink"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 31 + deletions_count: 4 + }, { + sha: "f77faa5a31c6ce337604c3102fea6f89df816830" + date: "2020-10-04 00:09:22 -0400" + description: "Initial `azure_monitor_logs` sink" + pr_number: 2811 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Nazar Mishturak" + files_count: 6 + insertions_count: 854 + deletions_count: 0 + }, { + sha: "9c3f09c172d3ae7f35c0707a60ffe6a874501112" + date: "2020-10-05 09:30:56 -0400" + description: "Improve error handling" + pr_number: 4288 + scopes: ["console sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 3 + insertions_count: 62 + deletions_count: 27 + }, { + sha: "51fd8679ac36b567c70b385e09b54b5bf59c5520" + date: "2020-10-06 15:53:08 -0400" + description: "add `format_number` function" + pr_number: 4220 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 275 + deletions_count: 0 + }, { + sha: "3e0eb97804874df96af548778db70b5d20418031" + date: "2020-10-07 17:27:29 -0400" + description: "Use configured namespace for `up` metric" + pr_number: 4437 + scopes: ["apache_metrics source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 15 + deletions_count: 14 + }, { + sha: "e83c4cc2d7d1e37c8a0d6769e799345bc85e5ff3" + date: "2020-10-07 16:11:54 -0400" + description: "Add `host_metrics` source" + pr_number: 4157 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 11 + insertions_count: 2101 + deletions_count: 15 + }, { + sha: "a9b176b2f94df3a6996f2081c9a4bbc84c57e271" + date: "2020-10-08 16:05:34 -0400" + description: "Rename sink type and fallback on US region by default" + pr_number: 4447 + scopes: ["sematext_logs sink"] + type: "fix" + breaking_change: false + author: "Nedim Šabić²" + files_count: 1 + insertions_count: 7 + deletions_count: 9 + }, { + sha: "b6a331bbdedd570ab7e8ffdbd4f10e48c0d9afe9" + date: "2020-10-08 17:45:44 -0400" + description: "Add `metric_to_log` transform" + pr_number: 3552 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Daniel Jin" + files_count: 8 + insertions_count: 451 + deletions_count: 2 + }, { + sha: "d3a5fe992e26d422e561b12cf294b748ea013d5d" + date: "2020-10-09 00:39:45 -0400" + description: "Merge `api::Options`" + pr_number: 4342 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 98 + deletions_count: 6 + }, { + sha: "a2723d7f2f162bdfb47473d6a3f0eb046dd1c87f" + date: "2020-10-09 09:32:36 -0400" + description: "Bidirectional source/transform/sink GraphQL types" + pr_number: 4383 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 2 + insertions_count: 627 + deletions_count: 16 + }, { + sha: "06da2aa13c3e24a36a4d735e494ef1730f4d76c9" + date: "2020-10-09 19:48:52 -0400" + description: "Improve error messages" + pr_number: 4429 + scopes: ["observability", "log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 2 + insertions_count: 112 + deletions_count: 70 + }, { + sha: "d04df77033dd30a90b45733c989146e59db9da80" + date: "2020-10-09 22:29:09 -0400" + description: "Add alias to avoid breaking change" + pr_number: 4469 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "178e5bed9509497dc73ecadda509b2e69f403b17" + date: "2020-10-09 17:43:05 -0400" + description: "Use temporary data directory during `validate` command" + pr_number: 4341 + scopes: ["cli"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 134 + deletions_count: 37 + }, { + sha: "1c41985f35cbc702925b919c3d660673f3a9c2ec" + date: "2020-10-09 14:39:39 -0400" + description: "New `aws_firehose` source and `aws_cloudwatch_logs_subscription_parser` transform" + pr_number: 4101 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Jesse Szwedko" + files_count: 20 + insertions_count: 1752 + deletions_count: 339 + }, { + sha: "36dea21a558d706535c980930f3c7b750b93bbfc" + date: "2020-10-11 10:05:17 -0400" + description: "New `sematext_metrics` sink" + pr_number: 3501 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 13 + insertions_count: 614 + deletions_count: 23 + }, { + sha: "2b3b98cea0844e254d65fcb3c674f93bbe6560b2" + date: "2020-10-11 18:52:53 -0400" + description: "Option to specify `quantiles`" + pr_number: 4412 + scopes: ["metrics", "sinks"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 107 + deletions_count: 28 + }, { + sha: "5ee768ac7fc31efbc80e9932b104d11c7272325f" + date: "2020-10-11 19:54:04 -0400" + description: "Allow inclusion of env field in request and default_env in config" + pr_number: 4077 + scopes: ["logdna sink"] + type: "feat" + breaking_change: false + author: "Robin Mattheussen" + files_count: 3 + insertions_count: 36 + deletions_count: 0 + }, { + sha: "48064d005664e772e5664645240d7a713d1364bd" + date: "2020-10-11 14:05:32 -0400" + description: "Update log levels for transforms" + pr_number: 4235 + scopes: ["observability", "transforms"] + type: "feat" + breaking_change: false + author: "Spencer Gilbert" + files_count: 8 + insertions_count: 15 + deletions_count: 15 + }, { + sha: "7dd7e8125e33751b38013c810629e408afdddf2e" + date: "2020-10-12 00:39:38 -0400" + description: "add TLS settings to influxdb_logs and influxdb_metrics sinks" + pr_number: 4037 + scopes: ["security", "networking", "influxdb_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Mathieu Stefani" + files_count: 5 + insertions_count: 161 + deletions_count: 29 + }, { + sha: "19585c3a9dbfe82590d92d96fb72a606e0f4b255" + date: "2020-10-12 23:37:26 -0400" + description: "Propagate close for Fanout and buffers::disk::Writer" + pr_number: 4275 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 3 + insertions_count: 49 + deletions_count: 26 + }, { + sha: "d4f9c0d33b03b7cf506d6dcb0cea41dcbabd7687" + date: "2020-10-13 15:26:22 -0400" + description: "Add a tags configuration options to add user-defined tags" + pr_number: 4068 + scopes: ["influxdb_metrics sink"] + type: "enhancement" + breaking_change: false + author: "Mathieu Stefani" + files_count: 2 + insertions_count: 87 + deletions_count: 15 + }, { + sha: "594242f7c2550458eaa34e021be28668cb1be946" + date: "2020-10-13 16:36:46 -0400" + description: "Add vector user to adm in debian packaging" + pr_number: 4544 + scopes: ["debian platform"] + type: "enhancement" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "1e95458540596b7e0058684afc8ac97caa84e992" + date: "2020-10-14 17:21:43 -0400" + description: "Support all socket types in statsd sink" + pr_number: 3557 + scopes: ["statsd sink"] + type: "enhancement" + breaking_change: true + author: "Do Duy" + files_count: 9 + insertions_count: 629 + deletions_count: 365 + }, { + sha: "dae23aa69f2190aa9de46ad375531cf31d3fa470" + date: "2020-10-14 09:39:32 -0400" + description: "Do not automatically append /metrics" + pr_number: 4276 + scopes: ["prometheus_source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 23 + deletions_count: 9 + }, { + sha: "fe05027693a157269f089dbc3ebd5b9a251973b1" + date: "2020-10-14 17:22:29 -0400" + description: "Add support for dedicated Azure region" + pr_number: 4507 + scopes: ["azure_monitor_logs sink"] + type: "feat" + breaking_change: false + author: "Nazar Mishturak" + files_count: 2 + insertions_count: 42 + deletions_count: 2 + }, { + sha: "2f2583b59062e686c9458d10c2679060c4600e4c" + date: "2020-10-14 13:51:30 -0400" + description: "Don't dedot keys" + pr_number: 4103 + scopes: ["http source"] + type: "fix" + breaking_change: true + author: "Daniel Jin" + files_count: 1 + insertions_count: 30 + deletions_count: 2 + }, { + sha: "93dd72459df1bfdd6cd35499588d21859f15135c" + date: "2020-10-15 18:47:13 -0400" + description: "Add compression level" + pr_number: 3032 + scopes: ["sinks", "networking", "compression"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 14 + insertions_count: 221 + deletions_count: 74 + }, { + sha: "b7ec1a94f4e6d59288519c0a085afed8cbc00ad9" + date: "2020-10-16 01:22:03 -0400" + description: "Allow using custom selectors" + pr_number: 4182 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 304 + deletions_count: 24 + }, { + sha: "73709ea6962d00e5424fb38fa4b05d0411e43a19" + date: "2020-10-16 08:27:32 -0400" + description: "`vector top`, v1" + pr_number: 4431 + scopes: ["observability"] + type: "feat" + breaking_change: false + author: "Lee Benson" + files_count: 15 + insertions_count: 393 + deletions_count: 13 + }, { + sha: "f1b99881b5d1db81c52a487d390c888a6550859c" + date: "2020-10-16 14:32:19 -0400" + description: "Support assume_role with EKS web identity" + pr_number: 4406 + scopes: ["aws service", "kubernetes platform", "auth"] + type: "enhancement" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 1 + insertions_count: 11 + deletions_count: 1 + }, { + sha: "20bc89ab8cf1979c35b15a98ebcf4e2ec51a758e" + date: "2020-10-16 16:30:56 -0400" + description: "Rename \"identifier_fields\" to \"group_by\"" + pr_number: 4580 + scopes: ["reduce transform"] + type: "enhancement" + breaking_change: true + author: "Jean Mertz" + files_count: 4 + insertions_count: 15 + deletions_count: 15 + }, { + sha: "b99af35ef731f2070917715b1b8143cbe1523bc4" + date: "2020-10-16 16:31:22 -0400" + description: "\"concat_newline\" strategy merges using newline" + pr_number: 4579 + scopes: ["reduce transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 45 + deletions_count: 10 + }, { + sha: "ea224e12940f3ad4cdc87f1077a813dadce777a0" + date: "2020-10-16 17:19:41 -0400" + description: "Sanitize URL when tagging metrics" + pr_number: 4410 + scopes: ["apache_metrics source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 72 + deletions_count: 19 + }, { + sha: "986e62cb8d2565d7f230d9375439551325bb7596" + date: "2020-10-16 23:35:15 -0400" + description: "New `humio_metrics` sink." + pr_number: 4531 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 9 + insertions_count: 304 + deletions_count: 133 + }, { + sha: "12db2a2435c524c24e747b35a35b155c4360d877" + date: "2020-10-16 21:05:12 -0400" + description: "Treat all hyper::Error results as retriable" + pr_number: 4575 + scopes: ["sinks", "networking"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "16bed3642aff5851c41da67f37ba1e6d80276563" + date: "2020-10-17 06:06:11 -0400" + description: "Advanced container filtering" + pr_number: 4586 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 4 + insertions_count: 649 + deletions_count: 37 + }, { + sha: "fe965704308dc1f405bd38c0f098ef4118a8c0bc" + date: "2020-10-17 05:06:33 -0400" + description: "Add `parse_url` function" + pr_number: 4428 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 174 + deletions_count: 0 + }, { + sha: "1ec7a8ba70435e290326cad207009bbe61d71636" + date: "2020-10-18 01:43:36 -0400" + description: "Support datadog logs new HTTPS transport" + pr_number: 4164 + scopes: ["datadog_logs sink", "networking"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 5 + insertions_count: 328 + deletions_count: 49 + }, { + sha: "4c67be50157cd651dc728f5bde0f22552cd0bd49" + date: "2020-10-18 11:07:59 -0400" + description: "Basic auth support" + pr_number: 4385 + scopes: ["logplex source", "auth"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 141 + deletions_count: 43 + }, { + sha: "ba55fd0305af6d060c06c216a29310d1776c4e20" + date: "2020-10-18 10:42:00 -0400" + description: "Added region configuration parameter" + pr_number: 4174 + scopes: ["datadog service"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 4 + insertions_count: 47 + deletions_count: 13 + }, { + sha: "0e40d1fee9eb16d0c86957764debd172620407e9" + date: "2020-10-18 08:14:33 -0400" + description: "Handle non-ASCII data in all values" + pr_number: 4608 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 23 + deletions_count: 11 + }, { + sha: "e982c684d9a6e6bf97134a31a3162d3a04a2d4b2" + date: "2020-10-18 10:14:59 -0400" + description: "Send auth header for healthchecks" + pr_number: 4604 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 48 + deletions_count: 2 + }, { + sha: "fe5beb3ca61b441271d15858eb244a22e5d9f82d" + date: "2020-10-20 17:22:15 UTC" + description: "Initial `mongodb_metrics` implementation" + pr_number: 4500 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Kirill Fomichev" + files_count: 14 + insertions_count: 2857 + deletions_count: 22 + }, { + sha: "84e5986a4eff07536c7b2f688600d3a5aa5593ea" + date: "2020-10-20 23:33:52 UTC" + description: "Correctly handle service restart" + pr_number: 4408 + scopes: ["windows platform"] + type: "enhancement" + breaking_change: false + author: "Mathieu Stefani" + files_count: 3 + insertions_count: 206 + deletions_count: 34 + }, { + sha: "31ba36840fa55a8d1709a6a639d60b393b86d7b7" + date: "2020-10-21 04:57:11 UTC" + description: "Add support for all socket types" + pr_number: 4557 + scopes: ["statsd source"] + type: "enhancement" + breaking_change: true + author: "Do Duy" + files_count: 6 + insertions_count: 290 + deletions_count: 86 + }, { + sha: "0e74a647c4c12c7a48ce6bd34d66c18c2356445a" + date: "2020-10-20 18:22:08 UTC" + description: "Updated internal metrics names to match standards" + pr_number: 4647 + scopes: ["observability", "internal_metrics source"] + type: "enhancement" + breaking_change: true + author: "James Turnbull" + files_count: 74 + insertions_count: 284 + deletions_count: 304 + }, { + sha: "716175c0575b05a725a9d8cd7d3f33d45f878df4" + date: "2020-10-22 18:44:54 UTC" + description: "Fix flag for `usermod` in postinst deb script" + pr_number: 4694 + scopes: ["debian platform"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "c447ecf1a6f5e39dd991177fd63ea00305559537" + date: "2020-10-22 20:04:11 UTC" + description: "Add internal events" + pr_number: 4481 + scopes: ["observability", "logfmt_parser transform"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 69 + deletions_count: 11 + }, { + sha: "c4c8cb69971e3a57b36018507f719ddc9e96bc94" + date: "2020-10-23 05:10:23 UTC" + description: "Only sleep when backing off" + pr_number: 4672 + scopes: ["file source"] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 91 + deletions_count: 5 + }, { + sha: "b65f9f873903e22a42b35a6e3e580a1b6e0006c8" + date: "2020-10-24 04:01:52 UTC" + description: "Add `namespace` to `Metric` " + pr_number: 4701 + scopes: ["data model", "metrics"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 29 + insertions_count: 372 + deletions_count: 2 + }, { + sha: "4c015a04cd1cca9128e446600c97f78dd582fa6e" + date: "2020-10-23 22:41:57 UTC" + description: "Send batches of lines" + pr_number: 4719 + scopes: ["file source"] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 5 + insertions_count: 57 + deletions_count: 45 + }, { + sha: "e17298d48c9940208e2412b143531f5cf4bda534" + date: "2020-10-24 07:57:39 UTC" + description: "Fix API max payload size" + pr_number: 4707 + scopes: ["new_relic_logs sink"] + type: "fix" + breaking_change: false + author: "Matilda Smeds" + files_count: 1 + insertions_count: 37 + deletions_count: 17 + }, { + sha: "176fdcfe689a85a1b756ef2866f6ef63ecac0705" + date: "2020-10-26 18:31:03 UTC" + description: "Force daemonset to redeploy when configmap is updated" + pr_number: 4734 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "Spencer Gilbert" + files_count: 2 + insertions_count: 7 + deletions_count: 4 + }, { + sha: "84dd243c0595196d8d6c61bb98c49067ad9eb3fb" + date: "2020-07-30 01:13:41 UTC" + description: "Fix default-msvc feature" + pr_number: 3246 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "d64c8b15c4627ddf7424e9ef303fbb3ebb6ab1aa" + date: "2020-07-29 18:40:47 UTC" + description: "Fix typo in metrics name" + pr_number: 3252 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "6dafbeb7c96a39779a65db923159626ffa107b96" + date: "2020-07-30 03:59:18 UTC" + description: "Kubernetes E2E test framework" + pr_number: 2702 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 32 + insertions_count: 1855 + deletions_count: 152 + }, { + sha: "cde9547e69f7808925408443c79f580bd7092d86" + date: "2020-08-05 00:44:07 UTC" + description: "tcp merge_and_fork" + pr_number: 3340 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "a391434e97a078f6ea92691bba76f9af91ad1866" + date: "2020-08-07 00:47:32 UTC" + description: "Add build.rs to CI trigger paths" + pr_number: 3367 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "4cfc66ec32590810ca37d2fde9efdfdd383a0602" + date: "2020-08-08 18:03:35 UTC" + description: "E2E CI flows optimization" + pr_number: 3353 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 4 + insertions_count: 188 + deletions_count: 48 + }, { + sha: "c0b6662f8e7f406626102e416d89d8534485181c" + date: "2020-08-18 04:58:57 UTC" + description: "Ship Helm Chart from release CI workflow" + pr_number: 3459 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 39 + deletions_count: 13 + }, { + sha: "2288fd237648529db57373eba09384f56c2f8093" + date: "2020-08-20 07:07:05 UTC" + description: "Wrap StreamSink around TcpSink" + pr_number: 3495 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Do Duy" + files_count: 2 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "931f6401702e690a4a513e18ae411c25e1a92b0a" + date: "2020-08-26 18:52:05 UTC" + description: "Only update auto-concurrency RTT from OK responses" + pr_number: 3493 + scopes: ["performance"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 155 + deletions_count: 111 + }, { + sha: "7094f511b1bf1ea9e639320fe20199c7d50d3b28" + date: "2020-09-02 00:04:23 UTC" + description: "Fix Splunk integration test" + pr_number: 3661 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Lee Benson" + files_count: 3 + insertions_count: 27 + deletions_count: 3 + }, { + sha: "85eff5814997d07cd4a2d99cc17f34c694c32111" + date: "2020-09-09 02:33:31 UTC" + description: "add deny.toml" + pr_number: 3765 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 12 + deletions_count: 0 + }, { + sha: "50a9c3af6779b887f9e72a313f7caee4c0d3c41c" + date: "2020-09-10 07:20:34 UTC" + description: "update next_addr_v6" + pr_number: 3785 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 3 + deletions_count: 7 + }, { + sha: "439a9cfdb48d976f19a0d2e5b84fc268752d93ba" + date: "2020-09-10 22:50:41 UTC" + description: "Fix journald tests" + pr_number: 3794 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "74b43b7ff208ae9531729acdd5331eb89a49851b" + date: "2020-09-10 22:05:52 UTC" + description: "Rework auto concurrency backpressure logic" + pr_number: 3804 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 12 + deletions_count: 6 + }, { + sha: "e55ac69b6a51e4c81e04b643c838459ecee65fdf" + date: "2020-09-16 03:04:16 UTC" + description: "expand macros before validation" + pr_number: 3894 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 42 + deletions_count: 2 + }, { + sha: "04a569a4a418f36c9b7d017151c2edc34e10758b" + date: "2020-09-17 17:51:26 UTC" + description: "Add new set of functions" + pr_number: 3767 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 8 + insertions_count: 1691 + deletions_count: 49 + }, { + sha: "b40b7b661599982eaab5eb8ce3dfbbb0e14ca5b3" + date: "2020-09-24 17:44:36 UTC" + description: "named function arguments" + pr_number: 3927 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 8 + insertions_count: 1163 + deletions_count: 413 + }, { + sha: "ebacf5728c8e56d7cd2b2cb1bac8dbe1901e79a6" + date: "2020-09-24 23:23:53 UTC" + description: "Update tokio signal to avoid unnecessary shutdowns." + pr_number: 4025 + scopes: ["windows platform"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 5 + insertions_count: 36 + deletions_count: 124 + }, { + sha: "cc324ca46ef1fe500802601efd04dc9844ab96c6" + date: "2020-09-29 03:23:40 UTC" + description: "fix clippy warnings for integration tests" + pr_number: 4171 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 8 + insertions_count: 44 + deletions_count: 38 + }, { + sha: "d8b368b86f6736f21ec974c4aae0020df0a0ba0a" + date: "2020-10-07 20:42:24 UTC" + description: "Add musl x86/aarch64 platforms" + pr_number: 3701 + scopes: ["platforms"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 19 + insertions_count: 220 + deletions_count: 58 + }, { + sha: "1dc820049d6ed9d2884c1d583e76d65cdc6eab21" + date: "2020-10-09 21:38:40 UTC" + description: "Adjust auto concurrency tuning defaults" + pr_number: 4476 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 16 + deletions_count: 14 + }, { + sha: "2fa1480b3edac8457b5266d58a476daa895164ac" + date: "2020-10-19 03:42:58 UTC" + description: "Use Signer::sign_to_vec to support older OpenSSL versions" + pr_number: 4623 + scopes: ["azure_monitor_logs sink"] + type: "fix" + breaking_change: false + author: "Nazar Mishturak" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "42a26f5eec2287012bc21b015d0bab43f818913b" + date: "2020-10-20 00:14:40 UTC" + description: "Topology added/removed GraphQL subscriptions" + pr_number: 4581 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 8 + insertions_count: 428 + deletions_count: 8 + }, { + sha: "fb0191c52f78e903a107b702f7dacd52815b429f" + date: "2020-10-21 02:00:47 UTC" + description: "add `floor`, `round` and `ceil` remap functions" + pr_number: 4646 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 6 + insertions_count: 472 + deletions_count: 0 + }, { + sha: "f51e4d6a63f157d31036d2234b5abc59b2509edf" + date: "2020-10-21 03:03:39 UTC" + description: "add `parse_syslog` function" + pr_number: 4632 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 4 + insertions_count: 289 + deletions_count: 2 + }, { + sha: "1c9c2f8fdf234a608f09ad5e7ca91fc65cdf6f08" + date: "2020-10-21 23:57:54 UTC" + description: "add `split` function " + pr_number: 4290 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 43 + insertions_count: 928 + deletions_count: 361 + }, { + sha: "4ec28751c143ffa68a20cf38e6e0492adeb56121" + date: "2020-10-22 06:32:25 UTC" + description: "Improve networking performance" + pr_number: 4668 + scopes: ["statsd sink"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 5 + insertions_count: 181 + deletions_count: 212 + }, { + sha: "30f23c7c6e154543492e40b2f978d7586451fc76" + date: "2020-10-22 18:29:46 UTC" + description: "API host metrics" + pr_number: 4652 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 16 + insertions_count: 1429 + deletions_count: 195 + }, { + sha: "7762f6491530b6f10083dc71f2d9c3f99cdef299" + date: "2020-10-23 22:26:33 UTC" + description: "syslog test should assume local time zone" + pr_number: 4711 + scopes: ["remap transform"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 10 + deletions_count: 10 + }, { + sha: "f683f9350d45a3f80de09a65f5a14185f31e99af" + date: "2020-10-24 03:36:13 UTC" + description: "add `log` function" + pr_number: 4640 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 4 + insertions_count: 118 + deletions_count: 2 + }, { + sha: "d1240a6dadd81d59eab5e179af68d8eb8fc1a97d" + date: "2020-10-24 20:09:33 UTC" + description: "use application/json for json encoding to Datadog" + pr_number: 4690 + scopes: ["datadog_logs sink"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 15 + deletions_count: 4 + }, { + sha: "79d146eb849539335c3f06b8df073d71767052f6" + date: "2020-10-26 22:00:29 UTC" + description: "add \"remap-lang\" crate" + pr_number: 4695 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 25 + insertions_count: 1838 + deletions_count: 1 + }, { + sha: "10f2c4b06471bbc3149552f768263e84b484f30a" + date: "2020-10-26 17:40:55 UTC" + description: "Add missing dependency to nightly docker-release" + pr_number: 4747 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "d75a672342c3e7439591aa9c5d4cf669ced57bac" + date: "2020-10-27 23:40:19 UTC" + description: "add `replace` and `flatten` remap functions" + pr_number: 4703 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 5 + insertions_count: 592 + deletions_count: 8 + }, { + sha: "e9440c8bd4a54cd893bb5e1fc097facd237cd8c1" + date: "2020-10-27 18:24:04 UTC" + description: "Switch from topology to components terminology" + pr_number: 4761 + scopes: ["observability"] + type: "chore" + breaking_change: false + author: "Luc Perkins" + files_count: 10 + insertions_count: 1169 + deletions_count: 1169 + }, { + sha: "38d9e1bf6b7431c30358afc3c428d1effe1ff0ca" + date: "2020-10-28 02:05:28 UTC" + description: "annotate logs with query parameters" + pr_number: 4733 + scopes: ["logplex source"] + type: "enhancement" + breaking_change: false + author: "Christian Gregg" + files_count: 6 + insertions_count: 194 + deletions_count: 70 + }, { + sha: "333f71832dd048a88a1395a6528e800e68dd4d31" + date: "2020-10-28 00:27:08 UTC" + description: "Count total bytes for logs" + pr_number: 4705 + scopes: ["blackhole sink"] + type: "chore" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 5 + deletions_count: 8 + }, { + sha: "fe87d036f9b2b1f2235f4d193bf90c364d27fdb6" + date: "2020-10-28 01:00:57 UTC" + description: "Make query_parameters optional" + pr_number: 4782 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "068d158327d79d3c9730ffebd8fdb9c611c8124d" + date: "2020-10-28 04:27:49 UTC" + description: "Updated remaining log messages to match standards" + pr_number: 4737 + scopes: ["logs"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 155 + insertions_count: 572 + deletions_count: 513 + }, { + sha: "07b9cbbe24ed1987edc134b989f29b7225acce23" + date: "2020-10-28 18:22:25 UTC" + description: "`vector top` v1 dashboard" + pr_number: 4702 + scopes: ["observability"] + type: "feat" + breaking_change: false + author: "Lee Benson" + files_count: 9 + insertions_count: 634 + deletions_count: 186 + }, { + sha: "256d55c720ab8e8fade0d5d661eebdbdf0e58856" + date: "2020-10-28 20:56:23 UTC" + description: "swap out remap impl with new remap-lang crate" + pr_number: 4709 + scopes: ["remap transform"] + type: "chore" + breaking_change: false + author: "Jean Mertz" + files_count: 97 + insertions_count: 5203 + deletions_count: 4958 + }, { + sha: "faec21c15db467b3b03ffa0fc664ed235549414a" + date: "2020-10-28 21:22:21 UTC" + description: "add \"remap\" condition type" + pr_number: 4743 + scopes: ["processing"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 49 + insertions_count: 377 + deletions_count: 91 + }, { + sha: "bda26823dfc5baa4fb5574c221693e33fcefe51f" + date: "2020-10-28 21:24:26 UTC" + description: "add \"match\" remap function" + pr_number: 4770 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 9 + insertions_count: 137 + deletions_count: 6 + }, { + sha: "0ea323574d8b5fbf1dfdb835dd58242fe55bf48c" + date: "2020-10-28 21:35:26 UTC" + description: "add \"starts_when\" option" + pr_number: 4771 + scopes: ["reduce transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 2 + insertions_count: 329 + deletions_count: 23 + }, { + sha: "3f11f7d29a738d0873b68d7c6e321bf9d9d8e29e" + date: "2020-10-29 19:44:19 UTC" + description: "Uses IP address as source_ip instead of IP:Port in Syslog source UDP mode" + pr_number: 4793 + scopes: ["syslog"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9952546f5c64544055e320f2a57b5dc690872f86" + date: "2020-10-30 01:38:49 UTC" + description: "support numbers in paths" + pr_number: 4800 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 18 + deletions_count: 2 + }, { + sha: "32162bb9d4b72246d16bed87eec4d696bebd075d" + date: "2020-10-30 10:55:24 UTC" + description: "Expose the performance related parameters" + pr_number: 4751 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 43 + deletions_count: 7 + }, { + sha: "a5b9f6765f93a699c6bb37ab290bbb243a4525aa" + date: "2020-11-02 03:57:20 UTC" + description: "Resolve port conflict in sinks" + pr_number: 4735 + scopes: ["reload"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 264 + deletions_count: 31 + }, { + sha: "d55bc0bde4d607e032dd7de4786438e7230fb933" + date: "2020-11-02 01:30:09 UTC" + description: "Run tests in on pull requests" + pr_number: 4736 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1d290794f863668037fc6669adae46d4c650bc68" + date: "2020-11-03 02:58:44 UTC" + description: "support else-if conditional expression" + pr_number: 4814 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 64 + deletions_count: 3 + }, { + sha: "8bf81ed4cf2cbc33e8c18ca5cd60689cba795ce3" + date: "2020-11-03 02:59:56 UTC" + description: "add variable support" + pr_number: 4802 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 45 + deletions_count: 13 + }, { + sha: "6ffdf722d1bc234e03da2d26db9e0b12ab5e9ca2" + date: "2020-11-03 07:34:32 UTC" + description: "use custom retry logic" + pr_number: 4813 + scopes: ["clickhouse sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 61 + deletions_count: 5 + }, { + sha: "eac3dba119a61229d74c6868d1cc744e35f1411f" + date: "2020-11-03 18:51:36 UTC" + description: "Add the ability to set conatiner ports at vector-agent Helm chart" + pr_number: 4835 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 9 + deletions_count: 0 + }, { + sha: "b04f4310116615dee6df12b7a42fdf626b2cec2f" + date: "2020-11-03 20:54:17 UTC" + description: "Add internal events" + pr_number: 4480 + scopes: ["aws_ec2_metadata transform"] + type: "enhancement" + breaking_change: false + author: "Jesse Szwedko" + files_count: 3 + insertions_count: 200 + deletions_count: 137 + }, { + sha: "497c76d54189953c62a959d9101453e613c932d2" + date: "2020-11-04 02:35:31 UTC" + description: "Adds optional file output to generator" + pr_number: 4819 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ian Henry" + files_count: 1 + insertions_count: 70 + deletions_count: 12 + }, { + sha: "c2e71041f2e4aa80246b61d00d75426fa01a8251" + date: "2020-11-04 23:48:25 UTC" + description: "Add `namespace` option" + pr_number: 4831 + scopes: ["log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 69 + deletions_count: 18 + }, { + sha: "f6b443bcccb05ded4f3c2fdf422eda76b145591c" + date: "2020-11-05 07:20:37 UTC" + description: "add `replace` function" + pr_number: 4861 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 299 + deletions_count: 161 + }, { + sha: "e3327f47863e178c21fa9a3da45e446b850d5868" + date: "2020-11-05 07:23:14 UTC" + description: "unescape forward slash in regex" + pr_number: 4864 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Jean Mertz" + files_count: 2 + insertions_count: 40 + deletions_count: 3 + }, { + sha: "40c7e1248d54a8f1a54c2b976de9d4ca0448d575" + date: "2020-11-05 07:49:22 UTC" + description: "display full error chain" + pr_number: 4859 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 64 + deletions_count: 20 + }, { + sha: "d52d3871880ca8e37b5b91428ffdfa703e705387" + date: "2020-11-05 04:23:19 UTC" + description: "Add back armv7 support" + pr_number: 4871 + scopes: ["platforms"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 13 + insertions_count: 270 + deletions_count: 42 + }, { + sha: "d1caa067ed0ea49853eebb847bf1b64209dbac01" + date: "2020-11-05 21:54:37 UTC" + description: "Parse RFC3614 messages lacking an app name" + pr_number: 4876 + scopes: ["syslog source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "515f9626d313773cd36a555839c9a54e610393e8" + date: "2020-11-05 20:37:35 UTC" + description: "Add Cue sources for the Vector CLI" + pr_number: 4880 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Luc Perkins" + files_count: 1 + insertions_count: 243 + deletions_count: 0 + }, { + sha: "38816fd245371717033e8b881dbd5e478267bb70" + date: "2020-11-06 02:59:39 UTC" + description: "Make print_amount an optional option with a default of 1000" + pr_number: 4883 + scopes: ["blackhole sink"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 2 + insertions_count: 13 + deletions_count: 3 + }, { + sha: "aade9e6e5c236f454c010e5e7366722ffa9b93b8" + date: "2020-11-06 19:50:53 UTC" + description: "Support template syntax in hostname and tags field" + pr_number: 4884 + scopes: ["logdna sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 4 + insertions_count: 247 + deletions_count: 51 + }, { + sha: "94efee7c92b5fb208dbf39214ddd997b07226764" + date: "2020-11-07 03:59:31 UTC" + description: "Fix passing --push arg at build-docker.sh" + pr_number: 4900 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "f58ed206591544815ddf1416adcb0fa657a1d413" + date: "2020-11-06 20:09:43 UTC" + description: "Add TLS and authentication options" + pr_number: 4881 + scopes: ["prometheus source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 85 + deletions_count: 4 + }, { + sha: "84010c6ff3595c932a622b76d2dfb006f7609c36" + date: "2020-11-06 23:00:04 UTC" + description: "Add initial setup for metrics docs in Cue" + pr_number: 4892 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Luc Perkins" + files_count: 4 + insertions_count: 94 + deletions_count: 2 + }, { + sha: "a58b8191f884051aa578761d90ff16c6385de505" + date: "2020-11-10 01:56:26 UTC" + description: "dont use flatten for logs config fields" + pr_number: 4924 + scopes: ["humio_metrics sink"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 3 + insertions_count: 110 + deletions_count: 36 + }, { + sha: "ab520b1589e5100ae9aba1185e423bd8c03c5355" + date: "2020-11-09 20:58:46 UTC" + description: "Prometheus remote_write" + pr_number: 4856 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 23 + insertions_count: 1577 + deletions_count: 746 + }, { + sha: "74067f309ddaec7dec8ae0c494cb000eadfd5439" + date: "2020-11-10 00:24:28 UTC" + description: "Update WASM transform to implement TaskTransform" + pr_number: 4934 + scopes: ["wasm transform"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 126 + deletions_count: 122 + }, { + sha: "95ce5c05dac8bea5cfe0f8998fed80086557b079" + date: "2020-11-10 06:02:48 UTC" + description: "`vector top`, release candidate" + pr_number: 4886 + scopes: ["observability"] + type: "feat" + breaking_change: false + author: "Lee Benson" + files_count: 29 + insertions_count: 1590 + deletions_count: 525 + }, { + sha: "a40a9529feec9bfccdd3391819adc3b20fb180d5" + date: "2020-11-11 01:52:14 UTC" + description: "Add configurable endpoint" + pr_number: 4873 + scopes: ["gcp_pubsub sink"] + type: "enhancement" + breaking_change: false + author: "rpaaron" + files_count: 2 + insertions_count: 27 + deletions_count: 9 + }, { + sha: "a228a28e4fb5cad26ad5165a6832e570d41a6a72" + date: "2020-11-10 22:22:11 UTC" + description: "Kind/type for `vector top`" + pr_number: 4928 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 7 + insertions_count: 140 + deletions_count: 42 + }, { + sha: "c45fa68f03082f1b4c3d33951b27d516d7a1e10c" + date: "2020-11-11 07:28:11 UTC" + description: "Remove hardcoded configs in Helm chart" + pr_number: 4938 + scopes: ["kubernetes platform"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 5 + insertions_count: 22 + deletions_count: 14 + }, { + sha: "30dc1b8176fec39626189a1552287875a7c099d7" + date: "2020-11-11 03:43:43 UTC" + description: "`aws_s3` source" + pr_number: 4779 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Jesse Szwedko" + files_count: 15 + insertions_count: 1628 + deletions_count: 159 + }, { + sha: "ee6c24517b8dca2e664f20b42734b7fc368843fc" + date: "2020-11-11 08:16:04 UTC" + description: "Fix markdown errors" + pr_number: 4954 + scopes: ["rfcs"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "4b82090ce4900ecd837a2196c7527210a199fa46" + date: "2020-11-11 20:21:00 UTC" + description: "Add span to future" + pr_number: 4944 + scopes: ["http source"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 78 + deletions_count: 72 + }, { + sha: "45b744c2bb89ea7824be318ab680d784883aa5b1" + date: "2020-11-11 20:21:36 UTC" + description: "Restart journalctl on errors, save checkpoint on shutdown" + pr_number: 4836 + scopes: ["journald source"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 270 + deletions_count: 242 + }, { + sha: "0b2c8f48756c8ca07800821f3065618c0c9b173c" + date: "2020-11-11 09:10:14 UTC" + description: "(key/value) kv parser" + pr_number: 3284 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Nick Hagianis" + files_count: 10 + insertions_count: 727 + deletions_count: 2 + }, { + sha: "ae42a8dd68c4afdbb30592bdb69781e8f51cf283" + date: "2020-11-12 04:39:16 UTC" + description: "make scrape interval configurable" + pr_number: 4998 + scopes: ["sources"] + type: "enhancement" + breaking_change: false + author: "Daniel Jin" + files_count: 1 + insertions_count: 24 + deletions_count: 5 + }, { + sha: "63ee6d486ab5db57548060eaacbed971fd9b7136" + date: "2020-11-12 16:54:56 UTC" + description: "Humanized formatting for `vector top` metrics" + pr_number: 4945 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 6 + insertions_count: 182 + deletions_count: 50 + }, { + sha: "d0b414e7839524210e6c550dadd74323774008ec" + date: "2020-11-12 16:55:46 UTC" + description: "Batch events processed total" + pr_number: 4958 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 1 + insertions_count: 48 + deletions_count: 23 + }, { + sha: "63acc541c1b59bf46892517dcc0d5c6078ca2087" + date: "2020-11-12 16:23:16 UTC" + description: "Added batch subscriptions for component bytes and errors" + pr_number: 5002 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Steve Hall" + files_count: 2 + insertions_count: 138 + deletions_count: 0 + }, { + sha: "7c72f888871a808e1da01c3a67df1607434d1f82" + date: "2020-11-12 19:28:34 UTC" + description: "Document `vector top` CLI" + pr_number: 4907 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Luc Perkins" + files_count: 1 + insertions_count: 46 + deletions_count: 2 + }, { + sha: "2dc1869a90036c076d6f7ffb9a15eb46180eed19" + date: "2020-11-13 17:21:31 UTC" + description: "API batch support + tests" + pr_number: 5004 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 4 + insertions_count: 235 + deletions_count: 4 + }, { + sha: "23d0702f529dfda66ba723a871272e6e2599a32d" + date: "2020-11-14 01:09:02 UTC" + description: "API version + hostname queries" + pr_number: 5018 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 3 + insertions_count: 84 + deletions_count: 0 + }, { + sha: "d06fae42d5f05faf7cc9717f93e9b860704ece1f" + date: "2020-11-13 20:53:19 UTC" + description: "Add PodIPs into Pod Metadata events" + pr_number: 4887 + scopes: ["kubernetes_logs source"] + type: "enhancement" + breaking_change: false + author: "Ian Henry" + files_count: 8 + insertions_count: 224 + deletions_count: 6 + }, { + sha: "eaaab650d81999aeaf88b03891a83082b31a19ea" + date: "2020-11-13 21:31:35 UTC" + description: "More debug info on more HTTP requests" + pr_number: 4999 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 26 + insertions_count: 136 + deletions_count: 84 + }, { + sha: "196aeec85cd9e703b2385b06344dfa74e180d6c4" + date: "2020-11-14 04:40:29 UTC" + description: "add internal option to ignore missing files" + pr_number: 5026 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 56 + deletions_count: 29 + }, { + sha: "b5e2ceaa74f50d87f810ac02fd2295eb3b407150" + date: "2020-11-15 11:59:41 UTC" + description: "Edited a few vector top error messages" + pr_number: 5034 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "b8f83d73bb088269c98525fdef28962b222da983" + date: "2020-11-16 07:42:03 UTC" + description: "New `nats` sink" + pr_number: 3605 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Eric Lee" + files_count: 10 + insertions_count: 764 + deletions_count: 7 + }, { + sha: "e2d01b83f92a50e7d0e9e48a9f235f1007a2b305" + date: "2020-11-16 21:59:54 UTC" + description: "compile-time program result type checking" + pr_number: 4902 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 65 + insertions_count: 3822 + deletions_count: 299 + }, { + sha: "f06c01f218f75ee1c7b610ac55523d5fcf823d8a" + date: "2020-11-17 04:46:57 UTC" + description: "support enum variants for function arguments" + pr_number: 5008 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 11 + insertions_count: 431 + deletions_count: 270 + }, { + sha: "f5f2031f9a51dd39291706b5b0e0982ed9e78191" + date: "2020-11-16 21:51:33 UTC" + description: "Fix fanout remove-while-iterating bug" + pr_number: 5027 + scopes: ["topology"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 172 + deletions_count: 99 + }, { + sha: "5d06064413da748247bc6a92784125388c4089b7" + date: "2020-11-17 08:51:17 UTC" + description: "use path arguments for `del` and `only_field` functions" + pr_number: 5015 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 9 + insertions_count: 151 + deletions_count: 170 + }, { + sha: "22ac9f9154b887e5c0587b41c836c1617a183684" + date: "2020-11-17 05:00:04 UTC" + description: "Renamed docker source to docker_logs" + pr_number: 5039 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "James Turnbull" + files_count: 10 + insertions_count: 132 + deletions_count: 101 + }, { + sha: "442615879c35d30a55d03e30160ecd5965bd4b4c" + date: "2020-11-18 00:51:08 UTC" + description: "expressions no longer return an option" + pr_number: 5053 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 52 + insertions_count: 359 + deletions_count: 609 + }, { + sha: "931d97dc7579aa2ba9198b91c121ac81df507cca" + date: "2020-11-18 07:19:39 UTC" + description: "Fix JSON compression" + pr_number: 5050 + scopes: ["http sink"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 67 + deletions_count: 4 + }, { + sha: "06cf78868835c2c33e0d859402c5b2984d7e075b" + date: "2020-11-18 02:24:35 UTC" + description: "Rename `version` -> `versionString` in GraphQL schema" + pr_number: 5074 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 6 + insertions_count: 53 + deletions_count: 4 + }, { + sha: "6bce5ac9d79a7662e97adce386ea93c8888f3bcb" + date: "2020-11-17 22:20:48 UTC" + description: "Flush messages" + pr_number: 5069 + scopes: ["splunk_hec source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 38 + deletions_count: 17 + }, { + sha: "c5fb3a1b12c9f51411784307f8c7ddd2649026c6" + date: "2020-11-18 00:07:38 UTC" + description: "Initial `aws_ecs_metrics` source" + pr_number: 4698 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Daniel Jin" + files_count: 9 + insertions_count: 2017 + deletions_count: 2 + }, { + sha: "287a4d8fddf6e60f922492e75060d5722c58fb8a" + date: "2020-11-18 19:24:58 UTC" + description: "Initial `aws_sqs` sink" + pr_number: 4675 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Kirill Fomichev" + files_count: 9 + insertions_count: 568 + deletions_count: 1 + }, { + sha: "fcb6e39e2fc9686fdd25df212bb411f61b2533b4" + date: "2020-11-18 17:55:26 UTC" + description: "undefined path or variable return null" + pr_number: 5056 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 31 + insertions_count: 46 + deletions_count: 196 + }, { + sha: "a68ea1f4a0ffd6aba5abdc24a5237fd23dcea9c5" + date: "2020-11-18 18:54:52 UTC" + description: "Add I/O (throughput) columns to `vector top`" + pr_number: 5016 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 21 + insertions_count: 698 + deletions_count: 371 + }, { + sha: "c357da46928b23db5016d6b5da17b44c49fb73e3" + date: "2020-11-18 19:54:00 UTC" + description: "Flush sinks in a couple of more places" + pr_number: 5082 + scopes: ["topology"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 34 + deletions_count: 24 + }, { + sha: "fcd26bd84484168000603bbfe0713614608653cd" + date: "2020-11-19 19:30:17 UTC" + description: "Fix flaky `timely_shutdown_docker` test" + pr_number: 5101 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1e441e969dc8f63ae40216d0f2a2acc422076c56" + date: "2020-11-20 05:32:21 UTC" + description: "add RUSTSEC-2020-0071 to deny.toml" + pr_number: 5117 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 8 + deletions_count: 1 + }, { + sha: "80587a2956383582708344e70cc425f58f9c933b" + date: "2020-11-20 00:41:39 UTC" + description: "Fix output timestamp" + pr_number: 5118 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 86 + deletions_count: 18 + }, { + sha: "c909c42292db7804a55d2635825724d33f80314e" + date: "2020-11-20 02:02:25 UTC" + description: "Fix flaky `configuration_path_recomputed` test" + pr_number: 5128 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 8 + deletions_count: 4 + }, { + sha: "c88040b9f6d29d83987fb0e536562b41888c8e96" + date: "2020-11-20 04:47:35 UTC" + description: "clean up file format and fix features" + pr_number: 5133 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 3 + deletions_count: 1 + }, { + sha: "37d5e73d9459eb6760ed0403c0df9b5fdc6c4555" + date: "2020-11-20 07:49:27 UTC" + description: "Fix test failure for empty metrics" + pr_number: 5134 + scopes: ["host_metrics source"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 44 + deletions_count: 39 + }, { + sha: "f1d162d44f963fdb79602ef2167c2a3d6eceb234" + date: "2020-11-21 04:03:15 UTC" + description: "Expire checkpoints" + pr_number: 5146 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 91 + deletions_count: 16 + }, { + sha: "31fb077080696539fab3fa7299a12e3a143e0aae" + date: "2020-11-21 05:20:31 UTC" + description: "Include kafka metadata as optional keys" + pr_number: 5153 + scopes: ["kafka source"] + type: "enhancement" + breaking_change: false + author: "Spencer Gilbert" + files_count: 2 + insertions_count: 66 + deletions_count: 4 + }, { + sha: "68f5907fe78f1c77294dec53588ad2b53d05e536" + date: "2020-11-22 03:05:32 UTC" + description: "Add ingestion timestamp" + pr_number: 4795 + scopes: ["kubernetes_logs source"] + type: "feat" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 18 + deletions_count: 2 + }, { + sha: "58d6e8ffa9f1fe7f5fdf31359435c8bc9f9e26c2" + date: "2020-11-23 03:02:01 UTC" + description: "Add rating by `index`" + pr_number: 4918 + scopes: ["sampler transform"] + type: "enhancement" + breaking_change: true + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 178 + deletions_count: 104 + }, { + sha: "ae7e0ce06af1b6a3165c7d5e5f4994055ac424ab" + date: "2020-11-23 19:38:05 UTC" + description: "Support basic-auth credentials in endpoint configuation" + pr_number: 5095 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 5 + insertions_count: 147 + deletions_count: 36 + }, { + sha: "cf8dee9dbc65c0c2e3232b60d82788018268f932" + date: "2020-11-23 19:01:21 UTC" + description: " Allow querying transform outputs on transform components" + pr_number: 5171 + scopes: ["api"] + type: "enhancement" + breaking_change: false + author: "Steve Hall" + files_count: 2 + insertions_count: 32 + deletions_count: 0 + }, { + sha: "44d2945c9cdc03f8ee084e4228be42acfcd19096" + date: "2020-11-24 06:41:50 UTC" + description: "Expose internal metrics cardinality as a internal metric counter" + pr_number: 4615 + scopes: ["metrics"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 92 + deletions_count: 10 + }, { + sha: "4ad53dd9a01a40aecfaddfc2c9e865e39d655fa8" + date: "2020-11-24 02:32:45 UTC" + description: "Upgrade bollard to 0.9 to close#4169" + pr_number: 5189 + scopes: ["docker_logs source"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 4 + insertions_count: 30 + deletions_count: 17 + }, { + sha: "18b9c0fade267e4833fa62067ba061ca84f35801" + date: "2020-11-25 00:10:23 UTC" + description: "Handle config warnings for `validate` command " + pr_number: 5044 + scopes: ["cli"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 11 + insertions_count: 90 + deletions_count: 64 + }, { + sha: "3cbb55bacc3f2df07e192a78fc44a62c8e2bf258" + date: "2020-11-25 18:53:27 UTC" + description: "Add test for component links" + pr_number: 5218 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Steve Hall" + files_count: 4 + insertions_count: 138 + deletions_count: 1 + }, { + sha: "0d4d66fedcc7eb13e0cfca773d30a91d34b25eb4" + date: "2020-11-26 06:46:35 UTC" + description: "Initial `nginx_metrics` source implementation" + pr_number: 5209 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Kirill Fomichev" + files_count: 19 + insertions_count: 736 + deletions_count: 29 + }, { + sha: "89ae91c1da0cbb789c4ae429172632d66f7252d5" + date: "2020-11-26 18:47:24 UTC" + description: "Allow tenant_id to be templatable on loki sink" + pr_number: 5204 + scopes: ["loki sink"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 2 + insertions_count: 110 + deletions_count: 20 + }, { + sha: "d9c1e6de6633b8d5d100bc839bf1d23ca26e8d28" + date: "2020-11-26 21:31:44 UTC" + description: "Fix incorrect _IA storage class names" + pr_number: 5245 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 23 + deletions_count: 1 + }, { + sha: "fb4119e11999b1439669c6a43527200050dc57c6" + date: "2020-11-27 05:36:15 UTC" + description: "improve arithmetic type checking" + pr_number: 5059 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 53 + deletions_count: 38 + }, { + sha: "79020644871c64d6b48b4d01260fdabf260e264a" + date: "2020-11-27 05:43:24 UTC" + description: "support resolving program to \"any\" value" + pr_number: 5060 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 129 + deletions_count: 49 + }, { + sha: "41a9594c8e59dfb42a76b3c62d4a967b4ed00dfe" + date: "2020-11-27 06:08:37 UTC" + description: "support query/assignment of object root" + pr_number: 5064 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 70 + deletions_count: 15 + }, { + sha: "1abf8a4dbdd94eb3148f517d76bd7bc356faedd8" + date: "2020-11-27 10:11:53 UTC" + description: "add parse_grok remap function" + pr_number: 4992 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 7 + insertions_count: 292 + deletions_count: 16 + }, { + sha: "e15fb91771955896669f76c27fe7edafefb13dfe" + date: "2020-08-01 06:25:15 UTC" + description: "pip3 installed binaries" + pr_number: 3287 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "26e302b7ff3b36edef565dd45a70de71f1b878b4" + date: "2020-08-04 05:12:22 UTC" + description: "color in not tty for tests" + pr_number: 3324 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 9 + deletions_count: 8 + }, { + sha: "9e5284a7de02f90374ddc44238484d123cac92c6" + date: "2020-08-05 02:32:14 UTC" + description: "conditional compilation for unix source" + pr_number: 3298 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "b8403c5eea29ff51df572f7a8c2085d682768744" + date: "2020-08-15 07:27:52 UTC" + description: "doc test in event/metric" + pr_number: 3461 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "5213f38a70f093d6bb41711bc7da10423704bcf4" + date: "2020-08-25 23:41:24 UTC" + description: "Fixed language on code fence in CONTRIBUTING.md" + pr_number: 3560 + scopes: ["external docs"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "63bdb57cb11b1f71cb21d1e9d0f9f4087ed00003" + date: "2020-08-26 02:29:19 UTC" + description: "Remove quiet verbose options as environment variables to stop test-shutdown failing" + pr_number: 3559 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 2 + insertions_count: 2 + deletions_count: 14 + }, { + sha: "aa5d9641f1f86308fbfdd6ff2c30b89a67c17e0e" + date: "2020-08-28 04:37:39 UTC" + description: "Rewrite LineAgg to allow passing context data and be more memory efficient" + pr_number: 3561 + scopes: ["file source"] + type: "perf" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 114 + deletions_count: 64 + }, { + sha: "ef722c3e092537984675235af98c9f5f4558617c" + date: "2020-09-10 01:55:16 UTC" + description: "remove unused imports" + pr_number: 3776 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 4 + deletions_count: 8 + }, { + sha: "5b48256ef5a3d9cfa831f0a205e9079b1f3c17f0" + date: "2020-09-18 05:31:03 UTC" + description: "Add missing inventory::submit" + pr_number: 3988 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 7 + insertions_count: 33 + deletions_count: 9 + }, { + sha: "b2125418b19c031e68ce0da804ff08ec24117f1c" + date: "2020-09-18 05:27:22 UTC" + description: "remove failing remap behavior test" + pr_number: 4004 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Jean Mertz" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "5fbcc587f2cc60894357ee5e78ce62db58f0f1f5" + date: "2020-09-22 05:27:13 UTC" + description: "Wrong inventory::submit" + pr_number: 4033 + scopes: [] + type: "chore" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "b031eadb3bebae98a418a11b245cd59a7f32c2ad" + date: "2020-09-30 18:24:05 UTC" + description: "A new internal lookup" + pr_number: 4066 + scopes: ["data model"] + type: "perf" + breaking_change: false + author: "Ana Hobden" + files_count: 41 + insertions_count: 983 + deletions_count: 214 + }, { + sha: "50fe38019570b67362584abbe3342acc6bab0217" + date: "2020-10-10 02:42:23 UTC" + description: "Add missing use" + pr_number: 4475 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "75d2e3a5d42590a4d39606cf98acd07c8ea1d78c" + date: "2020-10-12 06:25:25 UTC" + description: "logdna clippy and prometheus skipped field" + pr_number: 4508 + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "07176dacb6a27bd97bd5f72216274bace64ffd69" + date: "2020-10-14 23:40:52 UTC" + description: "Removed a stray Atom that snuck back in" + pr_number: 4566 + scopes: [] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "d99813e90c9668d4cfcf8d25abc329ed8eff4fb0" + date: "2020-10-22 16:02:35 UTC" + description: "Generate valid config for all components" + pr_number: 4636 + scopes: [] + type: "chore" + breaking_change: false + author: "Do Duy" + files_count: 52 + insertions_count: 791 + deletions_count: 102 + }, { + sha: "7e0c2664e32a2422b23eb854bb8373d963106450" + date: "2020-10-22 23:49:34 UTC" + description: "Renamed mongo metrics to new naming standards" + pr_number: 4699 + scopes: ["mongodb_metrics source"] + type: "enhancement" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 3 + deletions_count: 9 + }, { + sha: "c3ed69f11520b32e8dd28fe875a6785a5826447b" + date: "2020-10-25 23:21:59 UTC" + description: "Add `ConnectionOpen` gauge" + pr_number: 4681 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 174 + deletions_count: 15 + }, { + sha: "38f904ce0d08fad60c8297f74fe7890eed0d79f0" + date: "2020-10-27 02:22:19 UTC" + description: "make instrumentation only on trace" + pr_number: 4765 + scopes: ["observability"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 21 + deletions_count: 21 + }, { + sha: "b46e281459607db52c4779c9362108ae717ff47c" + date: "2020-11-05 01:10:54 UTC" + description: "Use `namespace` field in metric sinks" + pr_number: 4806 + scopes: ["data model"] + type: "enhancement" + breaking_change: true + author: "Kruno Tomola Fabro" + files_count: 21 + insertions_count: 396 + deletions_count: 308 + }, { + sha: "a87b132e6c00c56467a261a42a52be564962dc54" + date: "2020-11-05 02:01:22 UTC" + description: "Use `namespace` field in metric sources" + pr_number: 4833 + scopes: ["data model"] + type: "enhancement" + breaking_change: true + author: "Kruno Tomola Fabro" + files_count: 8 + insertions_count: 323 + deletions_count: 321 + }, { + sha: "bc2533d07c6c2b5828040819f625289491205ac4" + date: "2020-11-13 11:31:46 UTC" + description: "Prometheus uses cumulative histogram" + pr_number: 5001 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "Do Duy" + files_count: 2 + insertions_count: 23 + deletions_count: 7 + }, { + sha: "6d90a084e978ffb638fb3d3cbc105f7e0c71f479" + date: "2020-11-16 09:11:24 UTC" + description: "Extend `Resource` to sources " + pr_number: 4996 + scopes: ["shutdown"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 16 + insertions_count: 432 + deletions_count: 121 + }, { + sha: "2d4fa207f460de5f1c6f08bad05bd0c3aa3a36d1" + date: "2020-11-18 02:28:58 UTC" + description: "Beautify reports of conflicting `Resource` usage" + pr_number: 5048 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 54 + deletions_count: 24 + }, { + sha: "e016c713b5c37f9ef5f22f498441c6e6e2827354" + date: "2020-11-19 03:31:59 UTC" + description: "add _total suffix to events" + pr_number: 5098 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 5 + insertions_count: 18 + deletions_count: 18 + }, { + sha: "e769bccd78bbfa3098282b85567dad0e4f72763e" + date: "2020-11-21 02:07:48 UTC" + description: "Emit `FileOpen` in `file` sink and source" + pr_number: 4922 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 314 + deletions_count: 259 + }, { + sha: "803c68c031e5872876e1167c428cd41358123d64" + date: "2020-11-24 01:27:59 UTC" + description: "Incorrect Log Level Message" + pr_number: 5183 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Do Duy" + files_count: 1 + insertions_count: 13 + deletions_count: 17 + }, { + sha: "8c8725c1e4c7c2a2e7b03ebf764e1112d9c63d7d" + date: "2020-11-28 01:46:05 UTC" + description: "add ip address remap functions" + pr_number: 5145 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 12 + insertions_count: 868 + deletions_count: 1 + }, { + sha: "24155d4450744b2b2110446163cbd050df08685e" + date: "2020-11-28 02:07:03 UTC" + description: "Handle unexpected errors in ARC controller" + pr_number: 5267 + scopes: ["networking"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 18 + deletions_count: 7 + }, { + sha: "ba910afc6189d3d6e5c2a7844023ee53a64a624c" + date: "2020-11-28 03:39:11 UTC" + description: "New prometheus_remote_write source" + pr_number: 5144 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 26 + insertions_count: 981 + deletions_count: 504 + }, { + sha: "295a630b816aa19e996ac42bebeb93369adae4c2" + date: "2020-11-28 16:55:20 UTC" + description: "Add `exists` remap function" + pr_number: 5195 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 6 + insertions_count: 162 + deletions_count: 1 + }, { + sha: "9e51f2e3795d08598118c7bb6bf475b352b172b1" + date: "2020-11-29 07:21:46 UTC" + description: "Allow JSON and YAML config formats in addition to TOML" + pr_number: 5005 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 22 + insertions_count: 792 + deletions_count: 189 + }, { + sha: "9bddcb233a6ec7814e685d6060f7e0ffd37d71ad" + date: "2020-11-30 01:35:51 UTC" + description: "Add compact remap function" + pr_number: 5231 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 5 + insertions_count: 525 + deletions_count: 0 + }, { + sha: "84aebc078678549e1933d0c6c6505f3f464a6f40" + date: "2020-12-01 04:44:31 UTC" + description: "fix integration tests list" + pr_number: 5293 + scopes: ["ci", "tests"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 27 + deletions_count: 9 + }, { + sha: "fdb77ff020c7f0487c7bd3d39d78c5f8a56ef2ac" + date: "2020-11-30 22:36:19 UTC" + description: "Update release Windows task" + pr_number: 5295 + scopes: ["ci"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 39 + deletions_count: 6 + }, { + sha: "dfc18c17c149a5e7b1a7cd0697ef2b7085d99272" + date: "2020-12-01 00:01:49 UTC" + description: "Update release-docker task" + pr_number: 5299 + scopes: ["ci"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 3 + deletions_count: 2 + }, { + sha: "018091fc4b7c569d8d9b32c3d3d06d279eca2f87" + date: "2020-12-01 05:49:44 UTC" + description: "add assert remap function" + pr_number: 5073 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 6 + insertions_count: 199 + deletions_count: 0 + }, { + sha: "529581de69d431c9d0cbc59d152b5858d479e68f" + date: "2020-12-01 15:43:25 UTC" + description: "Enable TLS subscription connections in vector top" + pr_number: 5296 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Steve Hall" + files_count: 3 + insertions_count: 6 + deletions_count: 3 + }, { + sha: "12a9cc325f1f6c86ab802f3f6deda29079c857bc" + date: "2020-12-02 01:26:57 UTC" + description: "Add missing remap functions" + pr_number: 5312 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 9 + insertions_count: 758 + deletions_count: 22 + }, { + sha: "c8352140d597c6623b5c3486d39a27b9191c204e" + date: "2020-12-01 20:57:20 UTC" + description: "Change packages to have full versions and architectures" + pr_number: 5303 + scopes: ["ci"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 20 + insertions_count: 274 + deletions_count: 189 + }, { + sha: "10e42732a6f1b14d09208d5110634e9718d4cee9" + date: "2020-12-02 03:28:19 UTC" + description: "introduce encoding schema and pulsar avro schema" + pr_number: 5021 + scopes: ["pulsar sink"] + type: "enhancement" + breaking_change: false + author: "Alexandre DUVAL" + files_count: 10 + insertions_count: 284 + deletions_count: 16 + }] + } + "0.12.0": { + date: "2021-02-11" + codename: "Vector Remap Language" + description: """ + The Vector team is pleased to announce 0.12.0. This release\tintroduces the [**Vector Remap Language**](https://vector.dev/docs/reference/vrl/), + an expression-oriented language designed for transforming observability data (logs and metrics) in a + [safe](https://vector.dev/docs/reference/vrl/#safety) and [performant](https://vector.dev/docs/reference/vrl/#performance) manner. + + Check out the [VRL announcement post](https://vector.dev/blog/vector-remap-language) for more details as well as the + [highlights](#highlights) and [changelog](#changelog) for a complete list of changes in 0.12.0. + """ + whats_next: [{ + title: "Vector's own observability" + description: """ + We will be working to improve Vector's own observability with high-quality internal metrics, logs, + CLI tools, and dashboards. + """ + }, { + title: "Improved Vector to Vector communication" + description: """ + The `vector` source and sink currently leverage the TCP protocol to transmit data. While this has + served users well, it presents operational challenges for large-scale deployments of Vector. Because + HTTP is widely used and easy to integrate into platforms like Kubernetes we will be moving these + components to the HTTP protocol. + """ + }, { + title: "Schema support" + description: """ + With the inclusion of [type-safety in VRL](https://vector.dev/docs/reference/vrl/#type-safety) we will be working to officially + support common and custom schemas within Vector. This will be an opt-in feature that will provide + Vector with type information about your data, making Vector type safe from end to end and further + raising confidence that Vector will operate properly post-deployment. + """ + }] + commits: [{ + sha: "5ae2247d946733fc0f53d03d12afe83a2998cb7e" + date: "2020-12-02 19:50:10 UTC" + description: "Add UNIX datagram mode" + pr_number: 5298 + scopes: ["socket source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 272 + deletions_count: 64 + }, { + sha: "cb73ba0bc7d8bcb80578872181c7e539986060ea" + date: "2020-12-03 21:38:10 UTC" + description: "support path queries for variables" + pr_number: 5277 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 72 + insertions_count: 2230 + deletions_count: 449 + }, { + sha: "e19264990fc8f5b75d55f52c49460e8893690e01" + date: "2020-12-03 22:15:25 UTC" + description: "add support for literal arrays" + pr_number: 5278 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 6 + insertions_count: 145 + deletions_count: 14 + }, { + sha: "e9371ac1de252bb667a8c6d833dbd0ecfdb882b3" + date: "2020-12-03 22:48:07 UTC" + description: "accept array of arguments for function parameters" + pr_number: 5283 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 12 + insertions_count: 303 + deletions_count: 40 + }, { + sha: "c920bc7c95745e4d369e0c25fe38cb1ca071ee2a" + date: "2020-12-04 19:14:56 UTC" + description: "add `redact` function" + pr_number: 5297 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 235 + deletions_count: 0 + }, { + sha: "687456cc3a067905e8349df4c0ab557795105406" + date: "2020-12-05 00:39:56 UTC" + description: "Helm chart for deploying Vector with an aggregator role" + pr_number: 4404 + scopes: ["administration"] + type: "feat" + breaking_change: false + author: "MOZGIII" + files_count: 32 + insertions_count: 1262 + deletions_count: 48 + }, { + sha: "a5d67b3038b7d90aee13284bccad4d8db8e6f411" + date: "2020-12-05 04:34:40 UTC" + description: "Support exposing internal mertics out of the box in our Helm charts" + pr_number: 4854 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 24 + insertions_count: 767 + deletions_count: 8 + }, { + sha: "d4b7b306fe0af1354891920ff039917b61851849" + date: "2020-12-05 09:17:05 UTC" + description: "Add exclusion label at vector-aggregator" + pr_number: 5388 + scopes: ["kubernetes platform"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "2593d91b729e014abc4c8d2a79f6d4128e82baf6" + date: "2020-12-05 07:34:25 UTC" + description: "Implement TCP keepalive `time` configuration" + pr_number: 5157 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Pablo Sichert" + files_count: 32 + insertions_count: 313 + deletions_count: 82 + }, { + sha: "f25d0420afb7e6c5321d62d75bc6c3ca28930145" + date: "2020-12-08 21:18:37 UTC" + description: "fixing Compression parsing with tests" + pr_number: 5429 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 101 + deletions_count: 29 + }, { + sha: "8b3d59562d5665c60ecf255b2cadc68579acea94" + date: "2020-12-09 06:45:47 UTC" + description: "require features for generate_configfile test" + pr_number: 5427 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 9 + deletions_count: 9 + }, { + sha: "7fd382fd459986e5241963bb7fb1693fba12cf0a" + date: "2020-12-08 21:12:30 UTC" + description: "Enable excluding Docker containers via configuration" + pr_number: 5301 + scopes: ["docker_logs source"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 2 + insertions_count: 99 + deletions_count: 28 + }, { + sha: "3a48a0a47dd1e1b59113041a96cdced29937cc19" + date: "2020-12-10 21:59:42 UTC" + description: "Remove default `encoding.codec` where appropriate" + pr_number: 5281 + scopes: ["codec"] + type: "enhancement" + breaking_change: true + author: "Kirill Fomichev" + files_count: 18 + insertions_count: 242 + deletions_count: 244 + }, { + sha: "b1bc779d1fc96e06ddc34b11585fc1651ce2da86" + date: "2020-12-11 03:52:44 UTC" + description: "add initial \"trl\" CLI" + pr_number: 5450 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 245 + deletions_count: 1 + }, { + sha: "e04d32d0dfd513b95880ef2c1a7c8d362aac7317" + date: "2020-12-11 04:02:01 UTC" + description: "support multiline expressions" + pr_number: 5309 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 2 + insertions_count: 11 + deletions_count: 1 + }, { + sha: "b57f4d96a74da49c091aaf66d3974f5bd4330fdc" + date: "2020-12-10 21:51:52 UTC" + description: "Export metadata to remote_write" + pr_number: 5445 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 441 + deletions_count: 151 + }, { + sha: "feb0c0e7c6f9c21b4d1021076d9fec651ceba200" + date: "2020-12-11 12:26:59 UTC" + description: "Fix build issue caused by macro removal" + pr_number: 5480 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 13 + deletions_count: 1 + }, { + sha: "f38fb43999a5bb0896ac55c8f901d70db3af0ab8" + date: "2020-12-11 19:46:57 UTC" + description: "New internal_logs source" + pr_number: 5346 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 404 + deletions_count: 26 + }, { + sha: "44ecc2bf4c90f8903b858a681e32db0dbbaedda3" + date: "2020-12-13 02:45:52 UTC" + description: "Add support for basic auth credendtials in URL for more sinks" + pr_number: 5379 + scopes: ["auth"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 10 + insertions_count: 247 + deletions_count: 181 + }, { + sha: "565bc52ad0ba535a38e6a1f6a86575db205ffa36" + date: "2020-12-13 19:54:38 UTC" + description: "add integer division" + pr_number: 5353 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 6 + insertions_count: 42 + deletions_count: 3 + }, { + sha: "b40750a92083df2d33cfbd9deb58d75fe357fc20" + date: "2020-12-13 21:52:35 UTC" + description: "add initial CLI REPL support" + pr_number: 5490 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 7 + insertions_count: 303 + deletions_count: 68 + }, { + sha: "6e4b7a91e9119763ad1e803eba76523144ef8082" + date: "2020-12-14 01:19:02 UTC" + description: "allow multiple statements in boolean conditional" + pr_number: 5438 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 4 + insertions_count: 46 + deletions_count: 10 + }, { + sha: "da297f1c8fb2f0ce1c779cab38649c57e7091411" + date: "2020-12-14 07:41:46 UTC" + description: "Add missing methods to Subscriber forwarder" + pr_number: 5529 + scopes: ["metrics"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 10 + deletions_count: 0 + }, { + sha: "1d3b7c1814dc33a29126e0ecbc217d54646b04a8" + date: "2020-12-15 17:08:01 UTC" + description: "Correct the prometheus format parsing at K8s E2E tests for the metrics pipeline" + pr_number: 5540 + scopes: ["kubernetes platform"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 3 + insertions_count: 28 + deletions_count: 6 + }, { + sha: "38f0e7c90ff6d760993e0d0929c63215b749ab71" + date: "2020-12-17 08:19:21 UTC" + description: "Add host and TLS configuations" + pr_number: 5532 + scopes: ["docker_logs source"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 4 + insertions_count: 150 + deletions_count: 31 + }, { + sha: "171301191c8d5bc04409e7c3bf2b516e360c8f8b" + date: "2020-12-17 04:47:04 UTC" + description: "Implement improvements to the Pod recreation on Helm release upgrades" + pr_number: 5525 + scopes: ["kubernetes platform"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 10 + insertions_count: 96 + deletions_count: 12 + }, { + sha: "bf29d061c79d755e89069a4aa16ac1e0d37c97c8" + date: "2020-12-17 21:15:30 UTC" + description: "add remap functions to CLI" + pr_number: 5531 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 11 + insertions_count: 146 + deletions_count: 73 + }, { + sha: "535d898c8c9e337942cc3a2d772174382abc4f66" + date: "2020-12-17 23:35:25 UTC" + description: "track inner container type definitions" + pr_number: 5375 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 51 + insertions_count: 414 + deletions_count: 111 + }, { + sha: "a646ad908271ef6b81b16f769dc05fe20c23952f" + date: "2020-12-18 06:11:50 UTC" + description: "add `ok` error handling function" + pr_number: 5469 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 111 + deletions_count: 1 + }, { + sha: "4b33ff0b9eeb8e112dbf7e0cc77c1b52fa11ca19" + date: "2020-12-18 06:50:28 UTC" + description: "add support for literal maps" + pr_number: 5279 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 16 + insertions_count: 390 + deletions_count: 181 + }, { + sha: "e46bf469aacf4616cbda578c9511427535be7509" + date: "2020-12-18 10:49:37 UTC" + description: "Expose host metrics in our Helm charts out of the box" + pr_number: 5100 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 8 + insertions_count: 185 + deletions_count: 7 + }, { + sha: "01e726af0e4d24e8a45345441420397e5a9e8803" + date: "2020-12-18 11:36:37 UTC" + description: "Add global `healthchecks` option" + pr_number: 5528 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 13 + insertions_count: 153 + deletions_count: 71 + }, { + sha: "9f760f2c7d57ff37fe2c0b7bfabda681b334e832" + date: "2020-12-18 12:41:49 UTC" + description: "Emit `processed_events_total` in topology for transforms" + pr_number: 5492 + scopes: ["observability"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 60 + insertions_count: 47 + deletions_count: 410 + }, { + sha: "6389e8866af00e72e36b7e4d91d42da7c908f7a9" + date: "2020-12-18 18:11:02 UTC" + description: "add `parse_aws_alb_log` function" + pr_number: 5489 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 403 + deletions_count: 0 + }, { + sha: "e7316773675d04a5bf13bb20b67e2b88151b17df" + date: "2020-12-18 18:32:07 UTC" + description: "adjust deny.toml" + pr_number: 5552 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 27 + deletions_count: 30 + }, { + sha: "5f97129f8e2f5bc0e86f4e85d2d4dbf7cb2c2a09" + date: "2020-12-18 22:12:18 UTC" + description: "improve error messages, part 1" + pr_number: 5477 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 100 + deletions_count: 453 + }, { + sha: "5488be0a6252efe152bde9fd4babbffb7970fb75" + date: "2020-12-19 02:03:10 UTC" + description: "add `parse_aws_vpc_flow_log` function" + pr_number: 5504 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 365 + deletions_count: 1 + }, { + sha: "c62b51511f61379c7e418c5a3de6e8c5ac687bf3" + date: "2020-12-19 02:22:03 UTC" + description: "add support for data streams" + pr_number: 5126 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Spencer Gilbert" + files_count: 3 + insertions_count: 88 + deletions_count: 6 + }, { + sha: "7cc42beec5d3e859f4278fdeca599bbf48b67189" + date: "2020-12-19 00:33:55 UTC" + description: "Enable format selection for generator source" + pr_number: 5399 + scopes: ["generator source"] + type: "enhancement" + breaking_change: true + author: "Luc Perkins" + files_count: 11 + insertions_count: 434 + deletions_count: 102 + }, { + sha: "c12020dbccb7b04ff3c3a2165fb50d47cb9baa7a" + date: "2020-12-19 05:25:31 UTC" + description: "Add includes function for arrays" + pr_number: 5541 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 225 + deletions_count: 1 + }, { + sha: "a3f086cb6035455d1a28a91a44597f383b7c7785" + date: "2020-12-19 23:04:33 UTC" + description: "enable remap to work with metric events" + pr_number: 5475 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 7 + insertions_count: 841 + deletions_count: 368 + }, { + sha: "91f77ef44a4dfa182c27cf3483a878ad576d8757" + date: "2020-12-20 02:39:07 UTC" + description: "fix set metric reset on flush period" + pr_number: 5258 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 146 + deletions_count: 77 + }, { + sha: "7b0424cbf22b88be759bbaf84110f887a429ee8a" + date: "2020-12-20 00:53:10 UTC" + description: "tenant_id is templatable on loki sink" + pr_number: 5629 + scopes: ["documentation"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "8df93bc21eddce7f8be6ab3d3f4d12bec96fdc5b" + date: "2020-12-20 07:47:45 UTC" + description: "Fix type definition for `includes()`" + pr_number: 5628 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "47734445cb07cf104bc0bebd8102075b7953962b" + date: "2020-12-21 23:13:21 UTC" + description: "Add to_syslog_level function for parsing Syslog levels into strings" + pr_number: 5503 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 200 + deletions_count: 0 + }, { + sha: "b31cb7ec71765928194d14fd88f9718a3adf5f80" + date: "2020-12-22 09:56:00 UTC" + description: "docs for remap transform metrics" + pr_number: 5626 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 9 + deletions_count: 2 + }, { + sha: "ecc26afd3f0d71adb27cb192fa2152862696f90e" + date: "2020-12-22 04:02:33 UTC" + description: "podman pod stop/rm don't need a --name option" + pr_number: 5669 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 25 + deletions_count: 25 + }, { + sha: "b38ce3daef8e104aa518a9f5cb73c3794761de78" + date: "2020-12-22 02:54:50 UTC" + description: "Add to_syslog_severity function for Syslog log level keywords" + pr_number: 5505 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 4 + insertions_count: 181 + deletions_count: 0 + }, { + sha: "a894cf7cc5370f0d7176dc11a7b93838a045a2c9" + date: "2020-12-22 19:36:50 UTC" + description: "Allow owning the same port with different protocols" + pr_number: 5625 + scopes: ["networking"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 10 + insertions_count: 67 + deletions_count: 22 + }, { + sha: "284e18f8b50de17a0673ebac8d6d8726fc325870" + date: "2020-12-23 07:24:25 UTC" + description: "Print warning on encoding error" + pr_number: 5651 + scopes: ["codec", "observability"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 7 + insertions_count: 110 + deletions_count: 120 + }, { + sha: "ce9185eea202ceb5963a1e24b8f05d33600fcc16" + date: "2020-12-23 04:55:05 UTC" + description: "add encode_json function" + pr_number: 5653 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 218 + deletions_count: 1 + }, { + sha: "ee27b6835b44eeb753c50a89120fce34e89837f1" + date: "2020-12-23 16:43:34 UTC" + description: "handle incorrect int sequences as strings" + pr_number: 5683 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 21 + deletions_count: 5 + }, { + sha: "5a2dffc230b183c1cbfe3f9828452e9b1e745fc5" + date: "2020-12-23 23:51:49 UTC" + description: "fix tests for fake journal" + pr_number: 5694 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "ef45085f1c217fea58c9c608314baeae596945e2" + date: "2020-12-24 08:51:29 UTC" + description: "Support TLS configuration for Humio sinks" + pr_number: 5692 + scopes: ["networking", "security", "humio sink"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 3 + insertions_count: 17 + deletions_count: 2 + }, { + sha: "740624539d5a543f34ff9c1c6a692238055bb5eb" + date: "2020-12-24 06:34:24 UTC" + description: "File source metrics" + pr_number: 5645 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 23 + insertions_count: 1451 + deletions_count: 674 + }, { + sha: "263616f958daf64c47986e32f8bc9fa0cb6a32a9" + date: "2020-12-26 01:28:49 UTC" + description: "Add hostname to Docker source" + pr_number: 5690 + scopes: ["docker_logs source"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 3 + insertions_count: 40 + deletions_count: 6 + }, { + sha: "63e49917bb75c176a27bf572240431d8ad926701" + date: "2020-12-25 11:12:24 UTC" + description: "Add JSON format" + pr_number: 5630 + scopes: ["generator source"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 3 + insertions_count: 56 + deletions_count: 4 + }, { + sha: "bbb5fb66e741496490ff6460e61679d75fc244f0" + date: "2020-12-27 01:14:25 UTC" + description: "Add RFC 3164 format" + pr_number: 5631 + scopes: ["generator source"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 3 + insertions_count: 50 + deletions_count: 6 + }, { + sha: "6c4151f764b5ea4ad21181a5d2500ae93eeec53c" + date: "2020-12-28 06:25:36 UTC" + description: "Add `gzip` & `deflate` decompression" + pr_number: 5638 + scopes: ["http source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 10 + insertions_count: 221 + deletions_count: 65 + }, { + sha: "194353149be8652e82a7bcb381c7166d675e46a8" + date: "2020-12-29 00:26:06 UTC" + description: "`componentByName` query" + pr_number: 5707 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 8 + insertions_count: 101 + deletions_count: 12 + }, { + sha: "8877d141963b0502a2bbe878eddfd960b533c1d3" + date: "2020-12-28 23:34:38 UTC" + description: "lower handle http req log to debug" + pr_number: 5705 + scopes: ["http source"] + type: "fix" + breaking_change: false + author: "Spencer Gilbert" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "881ff658acc71703ea2a589bd69518fac1f0881e" + date: "2020-12-28 23:38:59 UTC" + description: "Make internal metrics pass promtools check" + pr_number: 5743 + scopes: ["internal_metrics source"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "3d0e1cfd7717dc54b2f9fc73384e6b2d62b4a999" + date: "2020-12-29 09:05:02 UTC" + description: "Better tracing/tests on rdkafka options." + pr_number: 5652 + scopes: ["kafka sink"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 108 + deletions_count: 39 + }, { + sha: "99f67cc19c23d7289301bdcd525e2c586bb0778e" + date: "2020-12-29 09:05:09 UTC" + description: "Fixup version script" + pr_number: 5752 + scopes: [] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "977c0106ecb7923eda630b88f3ab42b7c510f960" + date: "2020-12-30 01:53:43 UTC" + description: "add `parse_regex` and `parse_regex_all` remap functions" + pr_number: 5594 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 8 + insertions_count: 455 + deletions_count: 1 + }, { + sha: "acf9cfc375d2421a091e47d33e7ab8843573a4ea" + date: "2021-01-01 22:42:39 UTC" + description: "Adds script for generating README.md for Agent role helm-chart" + pr_number: 5590 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "Ian Henry" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "a07132a9496f74891b7868e5997da099e8dea43b" + date: "2021-01-03 09:46:19 UTC" + description: "do not use static year in tests" + pr_number: 5803 + scopes: ["syslog source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 12 + deletions_count: 10 + }, { + sha: "ad8909ca968ba185690c31fa6e4e510855bdd7ae" + date: "2021-01-03 02:43:46 UTC" + description: "Build ARMv7 images for all supported platforms" + pr_number: 5787 + scopes: ["ci"] + type: "enhancement" + breaking_change: false + author: "Dan Norris" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "18b22b0c0166f209eb6087a2302971304add8310" + date: "2021-01-04 06:53:54 UTC" + description: "add `parse_aws_cloudwatch_log_subscription_message` function" + pr_number: 5764 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 12 + insertions_count: 302 + deletions_count: 29 + }, { + sha: "26f730d62f9331b7c295ed71f3344afeca751849" + date: "2021-01-03 22:28:31 UTC" + description: "Trivial fixes for metrics batch buffer" + pr_number: 5699 + scopes: ["metrics"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 89 + deletions_count: 62 + }, { + sha: "242f4db6dda14f125efdc948072b8c44d8e6b08e" + date: "2021-01-03 20:38:55 UTC" + description: "Enable casting timestamps into integers" + pr_number: 5753 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 2 + insertions_count: 66 + deletions_count: 114 + }, { + sha: "6b97409f3298ad99c0d7d71c25dbb1e17930116a" + date: "2021-01-04 07:55:44 UTC" + description: "allow send metrics to kafka" + pr_number: 5712 + scopes: ["kafka sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 90 + deletions_count: 23 + }, { + sha: "a1f0d6b328bd053e4aea497332ece7ebc8ce8fe1" + date: "2021-01-04 08:03:19 UTC" + description: "Correct misspelling" + pr_number: 5826 + scopes: ["internal docs"] + type: "fix" + breaking_change: false + author: "Jose Diaz-Gonzalez" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "3b415d160de76e1ee4b600be5b475b922d38c221" + date: "2021-01-05 00:49:11 UTC" + description: "fix features for lib/shared" + pr_number: 5829 + scopes: ["deps"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9691d4f6392cbd5142aa30fe46877f8e81777b21" + date: "2021-01-05 01:16:49 UTC" + description: "Adds docker push guard back into CI scripts" + pr_number: 5848 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Ian Henry" + files_count: 2 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "c6a8c3e17618d97cade170cc030b649982d48e36" + date: "2021-01-05 21:36:54 UTC" + description: "added `parse_key_value` remap function" + pr_number: 5760 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "FungusHumungus" + files_count: 5 + insertions_count: 526 + deletions_count: 0 + }, { + sha: "d612b3caaaadba1790902efaf5addf5347b7be81" + date: "2021-01-06 01:24:06 UTC" + description: "correctly handle nested map insertions" + pr_number: 5834 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Jean Mertz" + files_count: 2 + insertions_count: 25 + deletions_count: 13 + }, { + sha: "948599795653f3b5894fa58046b309cb3ccc8da1" + date: "2021-01-06 01:24:50 UTC" + description: "non-boolean if conditional compile-time error" + pr_number: 5835 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 3 + insertions_count: 29 + deletions_count: 18 + }, { + sha: "a64d67156913b2702b1601f5011c9a279ac377c0" + date: "2021-01-07 03:07:18 UTC" + description: "Relay connections (components, initially)" + pr_number: 5747 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 11 + insertions_count: 652 + deletions_count: 83 + }, { + sha: "2daa2856fea746879eaa98645e74cd6018a77b0e" + date: "2021-01-07 05:45:14 UTC" + description: "Relay-compatible connections for sources/transforms/sinks" + pr_number: 5880 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 7 + insertions_count: 643 + deletions_count: 119 + }, { + sha: "b85cde4eff1ef790e78b6ae6ba83fc4aabc5624f" + date: "2021-01-07 05:58:44 UTC" + description: "Add is_nullish function to VRL" + pr_number: 5754 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 6 + insertions_count: 249 + deletions_count: 1 + }, { + sha: "b2e8c2680a5aae1828a0dfbfba7fee1fb8d6f403" + date: "2021-01-07 20:44:19 UTC" + description: "Fix HTTP and HTTPS URL" + pr_number: 5882 + scopes: ["docker_logs source"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 2 + insertions_count: 34 + deletions_count: 6 + }, { + sha: "5b5222c25b25552a139cf7b0349dda280bb790fd" + date: "2021-01-07 07:20:37 UTC" + description: "Move VRL CLI into the Vector CLI" + pr_number: 5756 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 13 + insertions_count: 231 + deletions_count: 156 + }, { + sha: "7458ebcd41bb7201ab71c560cf6798bd43b6237f" + date: "2021-01-07 20:26:48 UTC" + description: "add error coalescing operator" + pr_number: 5830 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 100 + deletions_count: 3 + }, { + sha: "7844bb7449a1981ff4dbe2e7745064182f407eb8" + date: "2021-01-07 20:45:37 UTC" + description: "support \"abort on error\" bang-function-calls" + pr_number: 5876 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 5 + insertions_count: 97 + deletions_count: 17 + }, { + sha: "19a90e8f1cba1642cfc9b97e8598c6a9ef310372" + date: "2021-01-07 20:16:23 UTC" + description: "Addressed minor spelling error in variable" + pr_number: 5899 + scopes: [] + type: "chore" + breaking_change: false + author: "James Turnbull" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "577edaf502d91b49f265b9853eb0d9dec10b3065" + date: "2021-01-08 23:39:02 UTC" + description: "error-return-value error handling pattern" + pr_number: 5911 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 12 + insertions_count: 324 + deletions_count: 62 + }, { + sha: "411100307f44d1d73933b0080dca1da254cd914d" + date: "2021-01-08 18:55:35 UTC" + description: "allow user configuration of the `ignore_not_found` option" + pr_number: 5616 + scopes: ["file source"] + type: "feat" + breaking_change: false + author: "Spencer Gilbert" + files_count: 2 + insertions_count: 9 + deletions_count: 1 + }, { + sha: "08bb58f02b536a266d92b395f4785335debf8e64" + date: "2021-01-08 20:58:10 UTC" + description: "Handle early return from influxdb create database" + pr_number: 5928 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 36 + deletions_count: 18 + }, { + sha: "1d99cefa0ebd5432a39fc38539c0a84ca6ff2fd7" + date: "2021-01-09 08:30:32 UTC" + description: "Fix prometheus exposition format" + pr_number: 5914 + scopes: ["prometheus_exporter sink"] + type: "fix" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 2 + insertions_count: 34 + deletions_count: 28 + }, { + sha: "dbcef9cedf9960169e0564ca6ed08808c4c9fc74" + date: "2021-01-09 21:51:58 UTC" + description: "bare variable names" + pr_number: 5856 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Jean Mertz" + files_count: 6 + insertions_count: 210 + deletions_count: 120 + }, { + sha: "808d0a61b647d13f4f9d7b90baa499cda97d15f6" + date: "2021-01-11 04:03:35 UTC" + description: "Avoid alias `in_flight_limit`" + pr_number: 5950 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 32 + deletions_count: 7 + }, { + sha: "df7136e69a37be36d6217116e1beec31cb6244b3" + date: "2021-01-11 04:40:15 UTC" + description: "Add to_syslog_facility function for parsing syslog facilities into strings" + pr_number: 5770 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Josh Bradley" + files_count: 6 + insertions_count: 318 + deletions_count: 1 + }, { + sha: "02952782aee141597d0ba15df0873c6e482d68a4" + date: "2021-01-11 19:00:10 UTC" + description: "Add to_unix_timestamp function" + pr_number: 5767 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 275 + deletions_count: 0 + }, { + sha: "4c6af99cc8a485b219d0d19c47a2fc4896d50ab6" + date: "2021-01-11 22:10:28 UTC" + description: "Rename tokenize to parse_tokens" + pr_number: 5983 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 30 + deletions_count: 30 + }, { + sha: "69df85b59e3425214113ad8dcaba37d13c4ac78f" + date: "2021-01-11 23:22:14 UTC" + description: "Add push and append functions" + pr_number: 5750 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 8 + insertions_count: 419 + deletions_count: 1 + }, { + sha: "1b9880c704d34cbe173feb2be213c11a0ed296af" + date: "2021-01-12 01:16:07 UTC" + description: "Add base64 encoding and decoding functions" + pr_number: 5768 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 10 + insertions_count: 367 + deletions_count: 1 + }, { + sha: "fece63629576cb394912c13998dd96af6e1826c5" + date: "2021-01-12 01:32:12 UTC" + description: "Make the redact function fallible" + pr_number: 5933 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Luc Perkins" + files_count: 1 + insertions_count: 83 + deletions_count: 2 + }, { + sha: "66674728a2bd6a66d37580105ab9e9242f23d139" + date: "2021-01-12 18:15:34 UTC" + description: "Set `core_threads` for `merge_and_fork`" + pr_number: 5968 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "fa541ab35ca5132f5ff357034d71cfad05b935a5" + date: "2021-01-13 00:47:54 UTC" + description: "remove the check_fields default for conditions" + pr_number: 5978 + scopes: ["config"] + type: "enhancement" + breaking_change: true + author: "FungusHumungus" + files_count: 7 + insertions_count: 47 + deletions_count: 25 + }, { + sha: "d29319746634c002c8c05ef0316826be1c55733b" + date: "2021-01-13 18:33:56 UTC" + description: "Fix remove_label_fields" + pr_number: 5974 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 1 + insertions_count: 6 + deletions_count: 2 + }, { + sha: "30642958e7835604f2b18022a898cb3136c065c2" + date: "2021-01-13 19:53:55 UTC" + description: "Fix transmission of counter metrics" + pr_number: 5922 + scopes: ["influxdb_metrics sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 137 + deletions_count: 53 + }, { + sha: "28155e37848feab7ab59fb8fe7a0818c5dce78b3" + date: "2021-01-14 03:30:29 UTC" + description: "Adjusting pod security policy to allow vector-agent to read host logs" + pr_number: 6019 + scopes: ["kubernetes platform"] + type: "fix" + breaking_change: false + author: "Thor Anker Kvisgård Lange" + files_count: 1 + insertions_count: 5 + deletions_count: 3 + }, { + sha: "e784d59040153f84cee43df429832f55c36f8681" + date: "2021-01-14 06:40:14 UTC" + description: "correct start/stop instructions" + pr_number: 6029 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "281db2bb0b09b7851e624065be27ae9c4a992a63" + date: "2021-01-15 01:19:47 UTC" + description: "Adjusting to adhere to linting rules used by ct" + pr_number: 6021 + scopes: ["kubernetes platform"] + type: "fix" + breaking_change: false + author: "Thor Anker Kvisgård Lange" + files_count: 5 + insertions_count: 9 + deletions_count: 9 + }, { + sha: "36adac70be243988803f9fe10580f06a8818cf3c" + date: "2021-01-15 00:57:13 UTC" + description: "allow comments between multiline statements" + pr_number: 6036 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 3 + insertions_count: 35 + deletions_count: 3 + }, { + sha: "50ec1abe557ea78514da64aa2f7af2d6562eb988" + date: "2021-01-15 08:44:02 UTC" + description: "Allow passing component configs as YAML in Helm charts" + pr_number: 5673 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 30 + insertions_count: 1593 + deletions_count: 170 + }, { + sha: "c750955256a7a07488b98df542730113c296f299" + date: "2021-01-15 08:44:35 UTC" + description: "Rework defaults and a minimal scraping interval value" + pr_number: 5682 + scopes: ["internal_metrics source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 13 + deletions_count: 24 + }, { + sha: "7fa399f6ebfac4885ca2158fe76e8b427e424d0d" + date: "2021-01-14 22:58:23 UTC" + description: "Open docs URL for help in VRL REPL" + pr_number: 5926 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 3 + insertions_count: 60 + deletions_count: 10 + }, { + sha: "ecf0537e8fdf0a3d19fb4efc145dc431b3937355" + date: "2021-01-15 08:55:34 UTC" + description: "Pretty print functions list in VRL REPL" + pr_number: 5935 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 3 + insertions_count: 47 + deletions_count: 11 + }, { + sha: "b7e1234ab24aa33c155f093ed7a8e0f3a08c934b" + date: "2021-01-15 19:03:44 UTC" + description: "Support X-Scope-OrgID header" + pr_number: 5987 + scopes: ["prometheus_remote_write sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 4 + insertions_count: 210 + deletions_count: 41 + }, { + sha: "0f7b6003aa6a7cb7fb37cc9f1eda2f54771caabc" + date: "2021-01-16 05:07:06 UTC" + description: "deprecate `check_fields` conditions" + pr_number: 6037 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 26 + insertions_count: 893 + deletions_count: 583 + }, { + sha: "158c7d78d0462737caeac9f7cc34ab0ea3ef1b21" + date: "2021-01-15 23:04:48 UTC" + description: "Add charset parameter to encode_base64 function" + pr_number: 6065 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 3 + insertions_count: 147 + deletions_count: 34 + }, { + sha: "b5835aa3c6c30f7ff37097bfde96ddaebd938535" + date: "2021-01-16 09:58:36 UTC" + description: "add tls support" + pr_number: 6078 + scopes: ["prometheus_exporter sink"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 6 + insertions_count: 121 + deletions_count: 54 + }, { + sha: "a4211e5a498980d0639e7178621589f98b096fcc" + date: "2021-01-16 03:36:10 UTC" + description: "Capture initialization logs" + pr_number: 6014 + scopes: ["internal_logs source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 106 + deletions_count: 35 + }, { + sha: "05e7359b139a942fb285e932a996fa4fdf48c1c3" + date: "2021-01-16 23:38:05 UTC" + description: "New `get_env_var` Remap function" + pr_number: 6017 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Duy Do" + files_count: 5 + insertions_count: 131 + deletions_count: 1 + }, { + sha: "9089bdfcc24ed23ae36c9b3faf4773555bdeb900" + date: "2021-01-18 00:31:42 UTC" + description: "Trigger `leveldb` compact after certain amount of deleted bytes" + pr_number: 5821 + scopes: ["buffers"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 131 + deletions_count: 13 + }, { + sha: "6f147f3e5bc80ef027b921ac7f6a4af585e86220" + date: "2021-01-18 18:49:52 UTC" + description: "Filtering in the API" + pr_number: 6028 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 10 + insertions_count: 1670 + deletions_count: 468 + }, { + sha: "f5f7228339fe3362d1cb841324deb87eeef78eb0" + date: "2021-01-19 07:04:24 UTC" + description: "Support for collecting logs from static pods" + pr_number: 6056 + scopes: ["kubernetes_logs source"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 5 + insertions_count: 229 + deletions_count: 5 + }, { + sha: "a2107bb182d471b5a351600d4ff8079de066564b" + date: "2021-01-19 09:20:07 UTC" + description: "diagnostic error messages" + pr_number: 6023 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 44 + insertions_count: 2306 + deletions_count: 1170 + }, { + sha: "bf3401cf188aea7b4faff77b5a9050e5666461ab" + date: "2021-01-19 02:59:58 UTC" + description: "Update VRL CLI behavior" + pr_number: 6035 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 2 + insertions_count: 30 + deletions_count: 18 + }, { + sha: "a33537613404080d617aeafd117bd8ae53daad53" + date: "2021-01-19 03:00:24 UTC" + description: "Add length function for VRL" + pr_number: 6088 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 11 + insertions_count: 279 + deletions_count: 6 + }, { + sha: "0e248d4103ba169cf15dafadd3daf45c0d2980d2" + date: "2021-01-19 10:02:51 UTC" + description: "Fix Metrics not Updating" + pr_number: 5917 + scopes: ["prometheus_exporter sink"] + type: "fix" + breaking_change: false + author: "Raphael Taylor-Davies" + files_count: 1 + insertions_count: 66 + deletions_count: 0 + }, { + sha: "7e5f6baa19cc616eab8b69e5be43c4c70990e1ff" + date: "2021-01-19 19:26:38 UTC" + description: "improve test harness set-up" + pr_number: 6105 + scopes: ["remap"] + type: "feat" + breaking_change: false + author: "Jean Mertz" + files_count: 31 + insertions_count: 273 + deletions_count: 13 + }, { + sha: "f33a2a4c03d1d8bfd3f1c22403964a1afb636377" + date: "2021-01-19 23:39:54 UTC" + description: "API sorting, starting with `components`" + pr_number: 6115 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 5 + insertions_count: 419 + deletions_count: 35 + }, { + sha: "d119fe9ee7889848f95375fe9a11fad8da8b9153" + date: "2021-01-20 00:30:43 UTC" + description: "`ipv6_to_ipv4` function allows ipv4 addresses to pass through" + pr_number: 6052 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 2 + insertions_count: 31 + deletions_count: 44 + }, { + sha: "c7a6fef2841767df73a0be706497ac8d5e2717ad" + date: "2021-01-20 15:06:28 UTC" + description: "Sorting for sources/transforms/sinks" + pr_number: 6136 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 6 + insertions_count: 710 + deletions_count: 193 + }, { + sha: "2f97437915c659be299aca8a39833bef738a777e" + date: "2021-01-21 00:25:46 UTC" + description: "Update VRL docs URLs" + pr_number: 6120 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Luc Perkins" + files_count: 1 + insertions_count: 3 + deletions_count: 2 + }, { + sha: "9c3baf76bf705dfd5e2ec53d88376aafc6976983" + date: "2021-01-21 03:38:05 UTC" + description: "Add encoding charset and line delimiter configuration" + pr_number: 5436 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Anup Dhamala" + files_count: 20 + insertions_count: 728 + deletions_count: 26 + }, { + sha: "5efa5d95f48640295728ac4c0672cfe6545b43ed" + date: "2021-01-22 03:56:37 UTC" + description: "support regexp in filters" + pr_number: 6118 + scopes: ["postgresql_metrics source"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 3 + insertions_count: 204 + deletions_count: 84 + }, { + sha: "98a066a1b659e43843c75760fc16cbbac6214380" + date: "2021-01-22 18:34:36 UTC" + description: "fix and document exclude_self logic" + pr_number: 5998 + scopes: ["docker_logs source"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 2 + insertions_count: 39 + deletions_count: 61 + }, { + sha: "e8ac7d50c89fd85d81363e866f8c613252b43336" + date: "2021-01-23 05:58:13 UTC" + description: "Add `auth.access_key_id` and `auth.secret_access_key` options" + pr_number: 6188 + scopes: ["auth", "aws platform"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 64 + deletions_count: 18 + }, { + sha: "35430409c771f63e45716035d0ae9fb12393943e" + date: "2021-01-23 05:11:52 UTC" + description: "Rename healthcheck error" + pr_number: 6192 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Andrew Haines" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "cfeb2c2e358c6bc47c3c70f7a8ded2970f958a2c" + date: "2021-01-27 01:30:46 UTC" + description: "add `get_hostname` function" + pr_number: 6141 + scopes: ["remap transform"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 7 + insertions_count: 111 + deletions_count: 0 + }, { + sha: "87a21478b5e4256239894072095a57f61c1af725" + date: "2021-01-27 02:32:55 UTC" + description: "Merge configs for `vector test`" + pr_number: 6214 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 58 + deletions_count: 80 + }, { + sha: "e777720e1e8ba17514500df6d4e010d9fb10012d" + date: "2021-01-28 03:24:51 UTC" + description: "add wildcard expansion to inputs" + pr_number: 6170 + scopes: ["config"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 5 + insertions_count: 188 + deletions_count: 35 + }, { + sha: "dad5a052f0669e9ea39172b197cc4a2775d506bd" + date: "2021-01-29 04:43:53 UTC" + description: "call ack on any result" + pr_number: 6267 + scopes: ["nats sink"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "7b762a072901f1be4a26dc37430d3ae1183b3ca3" + date: "2021-01-28 19:44:39 UTC" + description: "Extract new second-level features from Cargo.toml" + pr_number: 6263 + scopes: ["ci"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 11 + deletions_count: 3 + }, { + sha: "ae47b00fa3a0691d950c83976df48f5b83335662" + date: "2021-01-29 05:02:03 UTC" + description: "fix field sublocation_type in parse_aws_vpc_flow_log" + pr_number: 6229 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "e5ed779d4321793a55594b40235a7252d9c90200" + date: "2021-01-29 06:20:33 UTC" + description: "add the ability to configure Vector API at the Helm charts" + pr_number: 6248 + scopes: ["kubernetes platform"] + type: "enhancement" + breaking_change: false + author: "Oleg Tsymbal" + files_count: 10 + insertions_count: 65 + deletions_count: 0 + }, { + sha: "aa665d3bef94575a476fc533debc3049471eb6ea" + date: "2021-01-29 05:11:03 UTC" + description: "test again released version of loki" + pr_number: 6279 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "5382390239a4dfff5cc5ec2cc738d5309087d749" + date: "2021-01-29 08:17:13 UTC" + description: "Add more benchmark comparisons" + pr_number: 6233 + scopes: ["performance"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 9 + insertions_count: 752 + deletions_count: 3 + }, { + sha: "92a81794668fb89dbb576fdfcd8408138c2b41c6" + date: "2021-01-29 19:36:51 UTC" + description: "`parse_grok` should error when it fails to parse" + pr_number: 6271 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "FungusHumungus" + files_count: 1 + insertions_count: 7 + deletions_count: 6 + }, { + sha: "06749528c7ae94601738284dc818dfb31ef0de24" + date: "2021-01-29 20:05:33 UTC" + description: "split start_at_beginning into simpler options" + pr_number: 6178 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 6 + insertions_count: 209 + deletions_count: 65 + }, { + sha: "36b36a53bc6504c9176dac6fa445ab9347929659" + date: "2021-01-29 22:36:28 UTC" + description: "disallow non-unique component names" + pr_number: 6270 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 69 + deletions_count: 0 + }, { + sha: "3d37188984d1c519c134bc89fb1a617be9e76df7" + date: "2021-01-30 06:55:59 UTC" + description: "Make size of socket send/receive buffers configurable" + pr_number: 6177 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Pablo Sichert" + files_count: 32 + insertions_count: 545 + deletions_count: 164 + }, { + sha: "a2a54f561a9f7157bd59f12a7b72b564e346887d" + date: "2021-01-30 00:33:18 UTC" + description: "Fix metrics::tests::test_cardinality_metric" + pr_number: 6278 + scopes: ["metrics"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 23 + deletions_count: 10 + }, { + sha: "51f5827df6c6d3337db6099016af03cefbc7f9dc" + date: "2021-01-30 05:01:41 UTC" + description: "use poll backend for watcher" + pr_number: 6286 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 4 + deletions_count: 6 + }, { + sha: "055524028977e2ea5aa2fadbee093125e4d07ec1" + date: "2021-02-02 02:44:41 UTC" + description: "maintain type definitions for maps" + pr_number: 6182 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 27 + insertions_count: 748 + deletions_count: 179 + }, { + sha: "316937ce8067cc2785510d077e4172fb4f63ac58" + date: "2021-02-02 16:10:34 UTC" + description: "Fix out-of-order error" + pr_number: 5973 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Duy Do" + files_count: 4 + insertions_count: 361 + deletions_count: 38 + }, { + sha: "080a10cf5c2d3127ca3ff232b80aa6cf06d1573e" + date: "2021-02-02 05:40:10 UTC" + description: "revert poll watcher change" + pr_number: 6311 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "1e9d21123352b59ab590ba85aaabfe9eca9bcbb8" + date: "2021-02-03 02:03:20 UTC" + description: "Rework the metrics batch buffer" + pr_number: 6251 + scopes: ["metrics"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 11 + insertions_count: 877 + deletions_count: 456 + }, { + sha: "594a6c43e4e0f2d4e01136618f85a3a782eeec14" + date: "2021-02-03 08:20:06 UTC" + description: "Add charset option to decode_base64 function" + pr_number: 6296 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 7 + insertions_count: 219 + deletions_count: 138 + }, { + sha: "7f3443202d9b793568977280ee2951467165fc19" + date: "2021-02-03 21:52:33 UTC" + description: "#5642 - build deb packages that install on Deb 8" + pr_number: 6332 + scopes: ["deployment"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "9c55395cd85423fb63893596617b29130abfadd9" + date: "2021-02-03 21:52:46 UTC" + description: "Re-flatten the remap-functions directory tree" + pr_number: 6335 + scopes: ["remap"] + type: "fix" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 53 + deletions_count: 56 + }, { + sha: "cf6f56ac088ae525b1e368de64eaf9655487fbd3" + date: "2021-02-04 02:50:17 UTC" + description: "Fix handling of component errors" + pr_number: 6309 + scopes: ["topology"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 89 + deletions_count: 88 + }, { + sha: "344053fec4afc74ad8eed4833ade2c14a924a06e" + date: "2021-02-05 01:28:33 UTC" + description: "Add `uptime` & `boot_time` metrics" + pr_number: 6292 + scopes: ["host_metrics source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 59 + deletions_count: 1 + }, { + sha: "f53e7ec9d07e3ea6d555b82e85c038887113c1c2" + date: "2021-02-04 18:11:35 UTC" + description: "Add HTTP auth support" + pr_number: 6281 + scopes: ["prometheus_remote_write sink"] + type: "feat" + breaking_change: false + author: "William Perron" + files_count: 2 + insertions_count: 47 + deletions_count: 2 + }, { + sha: "e758240ecc8a83d8bdf568a8ce2f1759b944077f" + date: "2021-02-05 05:36:45 UTC" + description: "Correct the real world benches" + pr_number: 6350 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 7 + deletions_count: 2 + }, { + sha: "f2529f091786df59ab9d7155c69483f93fec3041" + date: "2021-02-05 01:48:23 UTC" + description: "Deprecate fingerprint.bytes option" + pr_number: 6338 + scopes: ["file source"] + type: "fix" + breaking_change: false + author: "Jesse Szwedko" + files_count: 2 + insertions_count: 19 + deletions_count: 20 + }, { + sha: "fe17fe46ae60c5983bd9f063bb412d7170e1eb15" + date: "2021-02-06 05:08:15 UTC" + description: "Add `parse_common_log` function" + pr_number: 6230 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Pablo Sichert" + files_count: 5 + insertions_count: 389 + deletions_count: 0 + }, { + sha: "103a0a7c07b351565f6ac136123a91bb1e373dda" + date: "2021-02-05 22:46:33 UTC" + description: "Add join function" + pr_number: 6313 + scopes: ["remap"] + type: "enhancement" + breaking_change: false + author: "Luc Perkins" + files_count: 5 + insertions_count: 282 + deletions_count: 0 + }, { + sha: "bec2c634c935341468b5da73bd43403697eddac4" + date: "2021-02-06 06:16:47 UTC" + description: "Add `parse_glog` function" + pr_number: 6293 + scopes: ["remap transform"] + type: "feat" + breaking_change: false + author: "Pablo Sichert" + files_count: 6 + insertions_count: 275 + deletions_count: 0 + }, { + sha: "e56874a27d8186820bc0aa664c351b8c532d04f5" + date: "2021-02-06 02:49:34 UTC" + description: "Update bollard to 0.9.1 to fix #5937" + pr_number: 6369 + scopes: ["docker", "dependencies"] + type: "fix" + breaking_change: false + author: "James Turnbull" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "c6c87329cb2de7baab8a9e7792dcb4a7668d2d9e" + date: "2021-02-08 01:10:15 UTC" + description: "Emit `processed_events_total` after transform has processed event" + pr_number: 6294 + scopes: ["observability"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 69 + deletions_count: 14 + }, { + sha: "85c0a29593418d7555357ae96e5980b8653ccb4b" + date: "2021-02-08 10:36:46 UTC" + description: "Switch to `futures-0.3` channels" + pr_number: 6283 + scopes: [] + type: "perf" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 36 + deletions_count: 11 + }, { + sha: "9c0bdd3adca4fe9243cca75053c66d10ed2fe9eb" + date: "2021-02-09 04:29:18 UTC" + description: "remove connect in SourceConfig::build" + pr_number: 6246 + scopes: ["mongodb_metrics source", "postgresql_metrics source"] + type: "fix" + breaking_change: false + author: "Kirill Fomichev" + files_count: 2 + insertions_count: 111 + deletions_count: 109 + }, { + sha: "2dc29ea32748b6f6839fa2941077c1ee4330ee0e" + date: "2021-02-09 06:51:17 UTC" + description: "`FileSourceMetricFile` sort" + pr_number: 6154 + scopes: ["graphql api"] + type: "enhancement" + breaking_change: false + author: "Lee Benson" + files_count: 6 + insertions_count: 339 + deletions_count: 32 + }, { + sha: "2000686a0d3b8671026ec87e762e9e6964758e21" + date: "2021-02-09 00:56:00 UTC" + description: "Move normalization out of MetricsBuffer" + pr_number: 6340 + scopes: ["metrics", "sinks"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 171 + deletions_count: 87 + }, { + sha: "c82bb6a5e74e89a59dcf853384e2acba970fc6d3" + date: "2021-02-09 08:00:35 UTC" + description: "Fix parse_json Wasm function" + pr_number: 6398 + scopes: ["wasm transform"] + type: "fix" + breaking_change: false + author: "Luc Perkins" + files_count: 1 + insertions_count: 13 + deletions_count: 5 + }, { + sha: "48af0ac6757f88241c708ad2063dffb637b5146e" + date: "2021-02-09 22:12:38 UTC" + description: "Log IO errors when globbing" + pr_number: 6384 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 78 + deletions_count: 19 + }, { + sha: "a6d8ef583ebe7009af91418caf1ae584cfb75db1" + date: "2021-02-09 20:51:09 UTC" + description: "Add support for metrics in templates" + pr_number: 6351 + scopes: ["templating"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 97 + deletions_count: 18 + }, { + sha: "f5574b5b53f9ac39f1008ffe29d69eafcaec5c9f" + date: "2021-02-10 07:05:30 UTC" + description: "change release profile" + pr_number: 6202 + scopes: ["releasing"] + type: "enhancement" + breaking_change: false + author: "Kirill Fomichev" + files_count: 4 + insertions_count: 14 + deletions_count: 28 + }, { + sha: "879c5f55a1cc1c96bcb00234118c33a616acc543" + date: "2021-02-09 22:31:35 UTC" + description: "Fix merge error with test cases" + pr_number: 6405 + scopes: ["templating"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "ce108f3c03a9da7c8e07ecca735843c8ca6bfa6b" + date: "2021-02-10 23:45:06 UTC" + description: "Expose node-port for vector-aggregator" + pr_number: 6020 + scopes: ["kubernetes platform"] + type: "feat" + breaking_change: false + author: "Thor Anker Kvisgård Lange" + files_count: 9 + insertions_count: 95 + deletions_count: 53 + }, { + sha: "066c22e6b1eab5bbc4e9dfe4ee6a24f3e491c421" + date: "2021-02-10 22:24:26 UTC" + description: "Handle OK response from api_watcher with embedded desync elegantly" + pr_number: 6053 + scopes: ["kubernetes_logs source"] + type: "fix" + breaking_change: false + author: "Ian Henry" + files_count: 13 + insertions_count: 1365 + deletions_count: 187 + }, { + sha: "4b6cf6556606af62e17bbe5414fa380e60580ce3" + date: "2021-02-11 01:13:38 UTC" + description: "Fix lua_field_filter/v2 implementation" + pr_number: 6355 + scopes: ["lua transform"] + type: "perf" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 4 + deletions_count: 5 + }] + } + "0.8.0": { + date: "2020-02-25" + codename: "A Wholesome Platter Of Features" + whats_next: [] + commits: [{ + sha: "c5ad7d22f8f3b02c5fd8bc784c7c5cf54dd7b694" + date: "2020-01-22 17:53:08 +0000" + description: "Make sorting of blog posts stable" + pr_number: 1566 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "3469bfcfeb39997c977c300399b8f2562f4c7730" + date: "2020-01-22 18:00:10 +0000" + description: "Add AWS API key for Windows tests in CI" + pr_number: 1565 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "b9584fae31e6809be62b912b4162454fad688485" + date: "2020-01-22 18:00:26 +0000" + description: "Pass `CIRCLE_SHA1` environment variable to `release-github` job" + pr_number: 1567 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "641cb48a13d46556448a18e5d4e6400a33a1e78b" + date: "2020-01-22 11:36:26 +0000" + description: "Fix a bug with `retry_limit` is set to 1" + pr_number: 1569 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 31 + deletions_count: 1 + }, { + sha: "203efcd8071edc4e6986968d3c8ff9b2e36f341d" + date: "2020-01-22 16:11:44 +0000" + description: "Enable more logging by default" + pr_number: 1564 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 8 + deletions_count: 7 + }, { + sha: "e3afda368582dd51f3518eaf2af3d5d82873b52a" + date: "2020-01-22 14:38:52 +0000" + description: "Allow building on Windows Stable Rust" + pr_number: 1560 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 9 + insertions_count: 180 + deletions_count: 96 + }, { + sha: "708d6228b09c2a3e84da635f21cef068555844d9" + date: "2020-01-22 19:30:47 +0000" + description: "Allow for non-IP address host names" + pr_number: 1575 + scopes: ["socket sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 9 + insertions_count: 117 + deletions_count: 118 + }, { + sha: "1c78da516d5d9769753c1a7194d0cb45271afcc4" + date: "2020-01-23 13:28:37 +0000" + description: "Use `describe_delivery_stream` AWS API method for healthcheck" + pr_number: 1573 + scopes: ["aws_kinesis_firehose sink"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 13 + deletions_count: 22 + }, { + sha: "a1cac868c9f4303cdb3258ded509a3889fc30f1a" + date: "2020-01-23 10:47:08 +0000" + description: "Bump version to 0.8.0 to fix `make check-version`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c960f953f93c2b717337033c8589fd8512278a08" + date: "2020-01-23 11:16:54 +0000" + description: "Update `Cargo.lock`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "f996505eaaff329d1e7360ceca97fac2e8b12aa3" + date: "2020-01-23 14:51:32 +0000" + description: "Escape special characters in options descriptions" + pr_number: 1580 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 26 + insertions_count: 53 + deletions_count: 53 + }, { + sha: "9aac838ff2bd09aacbb58cec79bebda54c0ba4ca" + date: "2020-01-23 19:18:46 +0000" + description: "Check that all blog articles are signed in CI" + pr_number: 1459 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 27 + insertions_count: 130 + deletions_count: 38 + }, { + sha: "c084001fc871db231bcb999f3946b7ad6b2319df" + date: "2020-01-23 11:36:35 +0000" + description: "Fix broken s3 link" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 9 + insertions_count: 454 + deletions_count: 20 + }, { + sha: "26539079cb5a5b3c2e530191297f70a05f3db024" + date: "2020-01-23 11:39:15 +0000" + description: "Fix socket sink docs" + pr_number: 1585 + scopes: ["socket sink"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "4e6fc6b240c23651e3b7323f9a0f3de6fdb7468b" + date: "2020-01-23 19:44:22 +0000" + description: "Create `vector` user when installing RPM package" + pr_number: 1583 + scopes: ["rpm platform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 11 + deletions_count: 2 + }, { + sha: "8eb88242da06eb14795957b0fb9d46b6ec8d7681" + date: "2020-01-23 11:56:59 +0000" + description: "Fix avatar on blog list page" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "6179310868047ceff60e46172e452c54b55e8aa1" + date: "2020-01-23 16:58:32 +0000" + description: "Redo blog sigs" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 0 + deletions_count: 0 + }, { + sha: "c057c0f70876ac2404ae1d59ced6ca3c3e29b96d" + date: "2020-01-23 21:44:23 +0000" + description: "Support bug fixing releases" + pr_number: 1587 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 18 + deletions_count: 5 + }, { + sha: "3adfc70416aa3cc078257a3192b148cd530dc770" + date: "2020-01-23 22:26:44 +0000" + description: "Add all generated files to the release commit" + pr_number: 1588 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "895fb9b2022df7708eec6fbd28e5bede710f3321" + date: "2020-01-23 23:31:55 +0000" + description: "Do not require `systemd` as an RPM dependency" + pr_number: 1590 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 2 + }, { + sha: "eab9ea663dd5bc52a4db4c99aa1518454d026a29" + date: "2020-01-23 23:37:55 +0000" + description: "Add `release-push` target to the Makefile" + pr_number: 1589 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 30 + deletions_count: 0 + }, { + sha: "372287922d98e4ebe48c2518eeaa35cc9ca77d86" + date: "2020-01-24 09:40:28 +0000" + description: "Fix typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "8d2126dae5af5d6b8ea426e74758e3eba27f8a79" + date: "2020-01-24 05:10:35 +0000" + description: "Wrap failing test in feature flag" + pr_number: 1595 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 2 + deletions_count: 3 + }, { + sha: "135d2504ccf3a7efdc6e664cb306ef830c31f7fd" + date: "2020-01-24 13:17:59 +0000" + description: "Always check containers for self" + pr_number: 1525 + scopes: ["docker source"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 53 + deletions_count: 59 + }, { + sha: "f1e1a06ffdcf9bd24df232a928750f23ce16ec39" + date: "2020-01-24 15:55:58 +0000" + description: "Fix `release-github` CI job" + pr_number: 1600 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 4 + deletions_count: 2 + }, { + sha: "f2a0df640cbbef34c4df17f1fc7bdc04b6d14ce0" + date: "2020-01-24 18:15:48 +0000" + description: "Run `make generate`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 56 + }, { + sha: "cc59c392b253daf83cb8d2a231a749fa1df0d2b7" + date: "2020-01-24 21:32:15 +0000" + description: "Fix `release-homebrew` CI job" + pr_number: 1601 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 5 + deletions_count: 6 + }, { + sha: "7fce71cf21dc5af5fa4db2de480589263468c03f" + date: "2020-01-25 13:21:26 +0000" + description: "Add Netlify _redirects file" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 4 + deletions_count: 1 + }, { + sha: "7b4721053c9284c8ef2ebc0335d565620ff0c098" + date: "2020-01-25 13:35:45 +0000" + description: "Force netlify redirects" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 14 + deletions_count: 11 + }, { + sha: "d06926ce8b74263d92e2f019a652c4e54ca78626" + date: "2020-01-25 15:21:53 +0000" + description: "Fix socket address documentation" + pr_number: 1607 + scopes: ["socket source"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 31 + insertions_count: 76 + deletions_count: 65 + }, { + sha: "59cd776e661b72e7728401333ffe611cfcf0b27e" + date: "2020-01-25 15:25:27 +0000" + description: "Remove unused guides attribute" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 27 + insertions_count: 0 + deletions_count: 27 + }, { + sha: "7fa4a9ef2c322e0ac0ce6a66ca328c0161241acf" + date: "2020-01-25 15:40:05 +0000" + description: "Fix service provider filters in components section" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 41 + insertions_count: 995 + deletions_count: 200 + }, { + sha: "0521ac92685ef18b093a076041767493484189c8" + date: "2020-01-25 15:58:22 +0000" + description: "Cleanup authentication options" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 24 + insertions_count: 315 + deletions_count: 350 + }, { + sha: "8be035e86eaf2e1fc6b40313b94b21e9232ae84a" + date: "2020-01-25 16:03:45 +0000" + description: "Make region for AWS services a required option" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 9 + insertions_count: 212 + deletions_count: 204 + }, { + sha: "c228164f54fcba7ff211a506776e17b8d7ca19a6" + date: "2020-01-25 16:19:23 +0000" + description: "The region option is only requried for AWS exclusive sinks" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 28 + deletions_count: 24 + }, { + sha: "26ab28bc49cb9e6dbbe1c8f245892f44b5ddb1de" + date: "2020-01-25 16:39:05 +0000" + description: "Nest source building steps under platform tabs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 274 + deletions_count: 236 + }, { + sha: "d6c85157af4cf189b8d0b29b26cf0468e32ce36c" + date: "2020-01-25 16:51:51 +0000" + description: "Add all vector.dev redirects to netlify.toml" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 46 + deletions_count: 0 + }, { + sha: "ccaf8e450fb1d83e2e249ee5ce8870e60816ba57" + date: "2020-01-25 17:21:32 +0000" + description: "Fix journald supported operating systems" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 4 + deletions_count: 5 + }, { + sha: "36b68149b36fe9768b5757c6eda43bea588a567e" + date: "2020-01-25 18:01:02 +0000" + description: "Add \"requirements\" section for relevant components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 56 + insertions_count: 550 + deletions_count: 1 + }, { + sha: "c4ef618e6b5c9437f24b48934c7563afe74827f2" + date: "2020-01-25 18:03:35 +0000" + description: "Add address example to logplex source" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 7 + deletions_count: 5 + }, { + sha: "c8999e5465a567ad27d51fd4a78a3ba31e3460a7" + date: "2020-01-26 13:27:41 +0000" + description: "Fix format errors" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "623c75377522ae5ad02095d3426fb15f9d4621f6" + date: "2020-01-26 15:40:19 +0000" + description: "Setup Netlify proxy for test-results subdomain" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 9 + deletions_count: 3 + }, { + sha: "ffac8b6cedee84217c779b5947910f0cabae1c8b" + date: "2020-01-26 15:47:59 +0000" + description: "Setup Netlify proxy for sh subdomain" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "61032b6b175e3e19b531c3e014c823f656f24aa0" + date: "2020-01-27 12:42:08 +0000" + description: "Add checkpointing and communication strategy to journald source docs" + pr_number: 1611 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 48 + deletions_count: 8 + }, { + sha: "3780652b48caf7550252a662956e669d638590d5" + date: "2020-01-27 21:00:26 +0000" + description: "Document adding the Vector user to a group which can use `journalctl`" + pr_number: 1584 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 33 + deletions_count: 7 + }, { + sha: "c85c759b2d28315dc05b117f3a610cf8f632b614" + date: "2020-01-27 17:41:37 +0000" + description: "Fix the journalctl start date to work across all timezones" + pr_number: 1627 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "219b1a8447fe03f56dacc314c7d5366fb87ca3ad" + date: "2020-01-28 15:30:54 +0000" + description: "Initial `gcp_stackdriver_logging` sink implementation" + pr_number: 1555 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 23 + insertions_count: 2095 + deletions_count: 120 + }, { + sha: "eb6b0a137944e356c31c14d8bf2422bc99b09256" + date: "2020-01-29 11:08:27 +0000" + description: "Disable kuberenetes tests in CI temporarily" + pr_number: 1629 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 35 + deletions_count: 39 + }, { + sha: "3149dcc9fef288db77ae765ec4166a959c5d5830" + date: "2020-01-29 11:16:50 +0000" + description: "Event::extend use FromIter" + pr_number: 1631 + scopes: ["event"] + type: "chore" + breaking_change: false + author: "Evan Cameron" + files_count: 2 + insertions_count: 18 + deletions_count: 22 + }, { + sha: "bb3aaa5d7d67adbacb4f77a684a9e66088a6d842" + date: "2020-01-29 12:03:49 +0000" + description: "Introduce `JsonArrayBuffer` buffer" + pr_number: 1633 + scopes: ["buffers"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 89 + deletions_count: 1 + }, { + sha: "5c1e38c61e8b66961d8b38685dc990ef468829d9" + date: "2020-01-29 17:31:53 +0000" + description: "Cleanup generate and check Dockerfiles" + pr_number: 1623 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 15 + deletions_count: 31 + }, { + sha: "51b5a2f43942d3a270475c2d24cb52ce4f9db59f" + date: "2020-01-29 19:52:01 +0000" + description: "Add new `drop_unspecified` option to drop unspecified fields" + pr_number: 1636 + scopes: ["coercer transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 165 + deletions_count: 25 + }, { + sha: "b51b1735c4cf348f5e33e2764d1e5b099561ab67" + date: "2020-01-29 21:02:44 +0000" + description: "upgrade http and prost-build deps" + pr_number: 1638 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 27 + deletions_count: 94 + }, { + sha: "cf06460aba1f02cc74d65e12af635515530e3dad" + date: "2020-01-30 09:50:21 +0000" + description: "add cargo-deny check for advisories" + pr_number: 1640 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 20 + deletions_count: 0 + }, { + sha: "f6626605e8b97af54bebe8bca6a373163a73a631" + date: "2020-01-30 12:59:24 +0000" + description: "Upgrade to rustc `1.41.0`" + pr_number: 1646 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "37edb080ec7a5d20e141e5813d0475c59927d366" + date: "2020-01-30 13:56:50 +0000" + description: "Add run.sh to run commands through Docker based on the `USE_DOCKER` env var" + pr_number: 1637 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 68 + deletions_count: 25 + }, { + sha: "c85022190fe0ca137e958aa58366b56a39d2a359" + date: "2020-01-30 16:59:35 +0000" + description: "Initial `sematext` sink implementation" + pr_number: 1562 + scopes: ["new sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 14 + insertions_count: 1079 + deletions_count: 3 + }, { + sha: "a9b7950a0f198bc0c87289d048a39b97cc915f4c" + date: "2020-01-30 17:25:26 +0000" + description: "Upgrade all dependencies" + pr_number: 1648 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 810 + deletions_count: 655 + }, { + sha: "525225524bb1e21121563f386a046bc68da5ac86" + date: "2020-01-30 17:43:19 +0000" + description: "Add Ana 🎉" + pr_number: 1649 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 12 + deletions_count: 0 + }, { + sha: "4f429e00b63a5cddfdd725cd3868f04d094f9e2f" + date: "2020-01-31 18:30:48 +0000" + description: "Fix group creation in the RPM package" + pr_number: 1654 + scopes: ["rpm platform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0a8b0c8043081afb2e01b36f7ebdd3c72c521961" + date: "2020-01-31 11:29:02 +0000" + description: "Update to new lockfile format" + pr_number: 1655 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1921 + deletions_count: 1931 + }, { + sha: "c1bf8b9a0a0c5f3e3b257e3b8749c114909363f6" + date: "2020-01-31 12:38:14 +0000" + description: "Fix EU endpoint and docs" + pr_number: 1657 + scopes: ["sematext sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "47a981f1677a1325c86cf1edbbaf8031126fc833" + date: "2020-01-31 12:38:38 +0000" + description: "Refactor internal http sink to use new helper API" + pr_number: 1641 + scopes: ["http sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 8 + insertions_count: 327 + deletions_count: 132 + }, { + sha: "b894cf441ceb22ea0f2f8f27b02a05c608b1910c" + date: "2020-01-31 21:27:55 +0000" + description: "Add ability to verify RPM artifacts locally" + pr_number: 1647 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 7 + insertions_count: 170 + deletions_count: 9 + }, { + sha: "ecf14f9a5b48694f45fad8569a47d34e3c4431b8" + date: "2020-01-31 13:55:36 +0000" + description: "Update Twitter handle to vectordotdev" + pr_number: 1664 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 9 + deletions_count: 9 + }, { + sha: "e24087a4273e80f4dbbff1e29ee65b0a2b85cdf5" + date: "2020-01-31 22:16:51 +0000" + description: "Fix creation of the tag in `make release`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1d21d4e27b940a32c505f673ccfefe6434ec3890" + date: "2020-01-31 22:24:27 +0000" + description: "Use all tags in `make release`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "a1dcc9679eba564f35a92d850080d0970a05fab6" + date: "2020-01-27 13:13:47 +0000" + description: "Improve 0.7.1 release notes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 73 + deletions_count: 73 + }, { + sha: "00653bfc5bdee996811277cc3246665cbccb7710" + date: "2020-02-01 00:38:11 +0000" + description: "Update README header links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 6 + insertions_count: 11 + deletions_count: 11 + }, { + sha: "1cc654284addd2e0774d486e27e6d742158029b6" + date: "2020-02-01 13:36:12 +0000" + description: "Fix nix verifier step (and nightly builds)" + pr_number: 1669 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 16 + deletions_count: 37 + }, { + sha: "ae6f1a975f9e71d6331790f4f78334d37e99f940" + date: "2020-02-01 17:27:23 +0000" + description: "flesh out security policy" + pr_number: 1642 + scopes: ["security"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 8 + insertions_count: 443 + deletions_count: 89 + }, { + sha: "cb3f8c1d4eddff297747df65d312810bb672cf99" + date: "2020-02-01 18:28:40 +0000" + description: "Fox SECURITY.md vulnerability link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "06670c9c42b79193962dd0995017177d5bac68d5" + date: "2020-02-01 18:29:30 +0000" + description: "Fix SECURITY.md TOC links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 35 + deletions_count: 35 + }, { + sha: "ed4b46a58712b589175abceb1b41d80294c431ea" + date: "2020-02-02 08:28:05 +0000" + description: "Colourize help" + pr_number: 1660 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 10 + deletions_count: 6 + }, { + sha: "df4c4bba3f24815bcf2ca5bf977e8498ddaf1916" + date: "2020-02-02 11:46:54 +0000" + description: "Add team member social profiles" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 11 + deletions_count: 0 + }, { + sha: "49dd7519e68d6034b96ac62dea31e5b3804e2e0a" + date: "2020-02-02 11:50:01 +0000" + description: "Fix COPY path for armv7 Docker image" + pr_number: 1671 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "f220a281ae5f451f55c589cd736b3403848d08c2" + date: "2020-02-02 13:46:33 +0000" + description: "Add assume_role docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 42 + deletions_count: 12 + }, { + sha: "d3ea7122ea63142e9c21026b59c2f0a4a72c167a" + date: "2020-02-02 14:24:49 +0000" + description: "Fix broken links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 54 + insertions_count: 144 + deletions_count: 148 + }, { + sha: "5b97e4c247340cc553b2359f9cdcd63ee02d4f28" + date: "2020-02-02 14:26:49 +0000" + description: "Clarify SECURITY.md requirements on 2fa and commit signing" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "eaad0fc503937a37b3ec935e4cb66499b8310c36" + date: "2020-02-02 21:48:59 +0000" + description: "fix typos" + pr_number: 1577 + scopes: [] + type: "docs" + breaking_change: false + author: "Anton Ryzhov" + files_count: 15 + insertions_count: 27 + deletions_count: 27 + }, { + sha: "14e2d5aca28ed7e0bdcbc3ccf005a4bd054332d5" + date: "2020-02-02 16:04:04 +0000" + description: "Add unix domain socket support to the `socket` sink" + pr_number: 1561 + scopes: ["socket sink"] + type: "feat" + breaking_change: false + author: "Spencer T Brody" + files_count: 9 + insertions_count: 379 + deletions_count: 50 + }, { + sha: "a04a584bbb61c936b10a3224c06b36b8e361448c" + date: "2020-02-02 16:40:59 +0000" + description: "Add .github/labels.yml for syncing labels" + pr_number: 1677 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 547 + deletions_count: 36 + }, { + sha: "ac874f883088f2c23642f7563a69be4b0397ff50" + date: "2020-02-02 16:44:49 +0000" + description: "Fix labels.yml path in label-syncer workflow" + pr_number: null + scopes: ["operation"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7ddd195b31a331ce17cfb18d287463469b48f56c" + date: "2020-02-02 16:47:27 +0000" + description: "Cleanup unused labels" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 9 + deletions_count: 14 + }, { + sha: "1c912204572e5b5e32096247baf94870f3f25b25" + date: "2020-02-02 23:13:07 +0000" + description: "Define a JSON schema for the .meta/**/*.toml files" + pr_number: 1678 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 55 + insertions_count: 733 + deletions_count: 111 + }, { + sha: "fcdddbc0dbe062c44643f3be3ce6644e4ff40525" + date: "2020-02-03 12:14:14 +0000" + description: "Upgrade `tracing-subscriber 0.2.0-alpha.5`" + pr_number: 1681 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 29 + deletions_count: 8 + }, { + sha: "74290db698ab6290c3f39437cb34586289d0778f" + date: "2020-02-03 20:30:15 +0000" + description: "Fix the SECURITY.md link at CONTRIBUTING.md" + pr_number: 1682 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "ba7a11caaac2c3c1812e3de6f0cb831a73c573dc" + date: "2020-02-03 21:21:18 +0000" + description: "Fix `aws_s3` sink examples" + pr_number: 1683 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 6 + deletions_count: 3 + }, { + sha: "36fad64b72c177f5d1d4c477e9dc8dda4839917e" + date: "2020-02-03 17:53:56 +0000" + description: "Remove background checks on security page" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 4 + deletions_count: 9 + }, { + sha: "465af0b75988ffa0c613be5eeb30b4f549295d6f" + date: "2020-02-03 16:46:24 +0000" + description: "Fixup versions in tracing-metrics" + pr_number: 1690 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 4 + deletions_count: 13 + }, { + sha: "5a368f34983394c5159b398b7810ac1f62591094" + date: "2020-02-04 12:11:08 +0000" + description: "Add several S3 object-level options (`grant_full_control`, `server_side_encryption`, and more)" + pr_number: 1689 + scopes: ["aws_s3 sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 638 + deletions_count: 7 + }, { + sha: "6575e3836672d6111fa8b958e5cb67f46595002e" + date: "2020-02-04 23:46:41 +0000" + description: "Use `rust-toolchain` file when building CI Docker images" + pr_number: 1691 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 8 + insertions_count: 10 + deletions_count: 20 + }, { + sha: "c285303bc5f403629060d3498b8e2cdf20c6ad9a" + date: "2020-02-05 08:14:34 +0000" + description: "New `merge` transform" + pr_number: 1504 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "MOZGIII" + files_count: 23 + insertions_count: 1539 + deletions_count: 38 + }, { + sha: "c285303bc5f403629060d3498b8e2cdf20c6ad9a" + date: "2020-02-05 08:14:34 +0000" + description: "Add `auto_partial_merge` to automatically merge partial events" + pr_number: 1504 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 23 + insertions_count: 1539 + deletions_count: 38 + }, { + sha: "44099cdf794dc3cac6926002253f2120e3af0c01" + date: "2020-02-05 00:32:03 +0000" + description: "Fix auto_partial_merge option name" + pr_number: null + scopes: ["docker source"] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "0c22ca5c5e00313a12aef3dd1d2abba311ad890c" + date: "2020-02-05 10:32:25 +0000" + description: "Warn log for non retryable requests" + pr_number: 1706 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "99899e35d43fbd8b5301ecdce327a7764f6eeceb" + date: "2020-02-05 10:33:00 +0000" + description: "Rename cloud and add host" + pr_number: 1704 + scopes: ["sematext sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 126 + deletions_count: 52 + }, { + sha: "08d629cb79d55fb49c305f1a8866baa4a921c2fa" + date: "2020-02-05 11:00:20 +0000" + description: "Upgrade to `tracing-subscriber 0.2.0`" + pr_number: 1707 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "ca196b70101bd8d5a0868e27c58ae20b36f67709" + date: "2020-02-05 11:51:30 +0000" + description: "Fixup security language" + pr_number: 1711 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 9 + deletions_count: 12 + }, { + sha: "aa54e7fff0507560372cf313fb115f957adc0645" + date: "2020-02-05 14:42:12 +0000" + description: "Allow partials in /.meta/*.toml files" + pr_number: 1715 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 17 + insertions_count: 105 + deletions_count: 81 + }, { + sha: "0d5db5d71f210fca4667cacc19b3d35276a3897e" + date: "2020-02-05 15:05:42 +0000" + description: "Initial `logdna` sink implementation" + pr_number: 1668 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 16 + insertions_count: 1389 + deletions_count: 4 + }, { + sha: "7e502fb3ca35afa935758985d633de6fc4c7bbfd" + date: "2020-02-05 14:06:54 +0000" + description: "Drop aws endpoint and region options" + pr_number: 1703 + scopes: ["elasticsearch sink"] + type: "chore" + breaking_change: true + author: "Bruce Guenter" + files_count: 21 + insertions_count: 137 + deletions_count: 384 + }, { + sha: "bcd97a0ae482dadc392afe2ca2a8ab651ee1acac" + date: "2020-02-05 15:21:52 +0000" + description: "Add ENV_VAR api key examples first" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 10 + deletions_count: 8 + }, { + sha: "033de8bdbcd8913a410f354310f0f09474285110" + date: "2020-02-05 15:26:57 +0000" + description: "Fix LogDNA casing" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "2414924f69e4845f85e596f5bb9ef3db0b04c13a" + date: "2020-02-05 15:27:58 +0000" + description: "Add proper non retry messages" + pr_number: 1712 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 76 + deletions_count: 27 + }, { + sha: "fb88fdd211b25fd5c21be881e7a9c920626d614a" + date: "2020-02-05 20:41:25 +0000" + description: "Add a (super secret) vic page" + pr_number: 1710 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 12 + insertions_count: 738 + deletions_count: 0 + }, { + sha: "68dc4b099d7f4f1bac6f43ad27282d31aaaa7fd8" + date: "2020-02-05 16:10:37 +0000" + description: "make host calculation more explicit" + pr_number: 1720 + scopes: ["sematext sink"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 13 + deletions_count: 20 + }, { + sha: "7529fb88ace3d60b697e0c856e85d855b97a7070" + date: "2020-02-05 20:14:01 +0000" + description: "Add providers to labels and semantic PR scopes" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 8 + insertions_count: 153 + deletions_count: 43 + }, { + sha: "a70758f52937217662f7906e5140f92757623213" + date: "2020-02-06 20:32:18 +0000" + description: "Allow running all tests locally" + pr_number: 1701 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 15 + insertions_count: 636 + deletions_count: 172 + }, { + sha: "8fb49eec4a8096635e83a88f909b1892359b020f" + date: "2020-02-06 17:37:30 +0000" + description: "Clarify topology warnings" + pr_number: 1726 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 3 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "d4c28115af4bbea56e5e7dc5ee137c682290ebe1" + date: "2020-02-06 20:56:43 +0000" + description: "Restore `rust-toolchain` file in the project root" + pr_number: 1729 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 9 + insertions_count: 79 + deletions_count: 77 + }, { + sha: "13c7e9a296769b83ea939e0d248b30df83c12615" + date: "2020-02-06 20:59:34 +0000" + description: "Fix `test-stable` CI job" + pr_number: 1730 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0d5287bd47524e00725d50b60fc0e7cc1f2c0432" + date: "2020-02-06 13:42:41 +0000" + description: "Replace Alexey with Luke" + pr_number: 1723 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "bfbe58cf1d25c905c27080a447f5e9a4fe356a98" + date: "2020-02-06 17:09:54 +0000" + description: "Set `beta` to true" + pr_number: null + scopes: ["merge transform"] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "5f8769d9c8ea52e4b39b4cf5f410bfbbf8c3755e" + date: "2020-02-06 17:15:21 +0000" + description: "Require `beta` attribute for all components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 34 + insertions_count: 45 + deletions_count: 15 + }, { + sha: "93b2c1510f2b8151722ab6dc9cc687dc796a4e2a" + date: "2020-02-07 10:27:17 +0000" + description: "Support multiple configuration files." + pr_number: 1725 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 208 + deletions_count: 40 + }, { + sha: "9c3b12b700c9f408c03db63d014db6e27e2ff87b" + date: "2020-02-07 14:01:19 +0000" + description: "Improve `docker-run.sh` script" + pr_number: 1733 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 18 + deletions_count: 5 + }, { + sha: "52e2c85cd7d5f7c190edaa0c345c7a5208f5cc4c" + date: "2020-02-07 09:09:22 +0000" + description: "Add support for running containers under Podman" + pr_number: 1736 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 28 + deletions_count: 21 + }, { + sha: "6f290d3e55d78438100b1bd31747c6c4b1630184" + date: "2020-02-07 16:02:56 +0000" + description: "Improve cyclic deps error message" + pr_number: 1728 + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 98 + deletions_count: 77 + }, { + sha: "ce326530da9235233355a86dcc5c27b626f40a10" + date: "2020-02-07 14:51:44 +0000" + description: "Add support for `assume_role` to all AWS sinks" + pr_number: 1722 + scopes: ["aws provider"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 13 + insertions_count: 330 + deletions_count: 82 + }, { + sha: "a931f0ddca833eee9f1a5113e458f468fecaa221" + date: "2020-02-08 15:06:56 +0000" + description: "Disable failing CI check" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 6 + }, { + sha: "46e0df96231b2b04e3b29263f4e73ae1d79a09ca" + date: "2020-02-08 10:23:15 +0000" + description: "Fix loading of private keys in separate files for sinks" + pr_number: 1749 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 44 + deletions_count: 1 + }, { + sha: "d128ab8eb163f1aca92db3093f86fd1a447ab64c" + date: "2020-02-08 10:24:00 +0000" + description: "Use the \"minimal\" profile when installing with rustup" + pr_number: 1747 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 7 + deletions_count: 5 + }, { + sha: "350f5aa9be81ff271b46e86ced6ec820e4cf4024" + date: "2020-02-09 18:59:07 +0000" + description: "Move \"magic\" component fields up to the /.meta directory" + pr_number: 1758 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 181 + insertions_count: 2766 + deletions_count: 3018 + }, { + sha: "20fc32b0674bf40897011b3fe7ce3e4335b31358" + date: "2020-02-10 16:13:51 +0000" + description: "Fix `verify-nixos` CI check" + pr_number: 1760 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 8 + deletions_count: 2 + }, { + sha: "2ee1b8867e5c79057ed4467d9d4ce3984dd73599" + date: "2020-02-10 14:16:42 +0000" + description: "Fix nested field documentation" + pr_number: 1765 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 10 + insertions_count: 193 + deletions_count: 21 + }, { + sha: "0eb6c9804f4be108960fb48921fdbd87e150d946" + date: "2020-02-10 16:28:09 +0000" + description: "Initial `humio_logs` sink implementation" + pr_number: 1716 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 12 + insertions_count: 1022 + deletions_count: 1 + }, { + sha: "7a693547f4b1fcdc58fca3b0679410c53584cda7" + date: "2020-02-10 17:31:15 +0000" + description: "Fix encoding docs" + pr_number: 1768 + scopes: ["splunk_hec sink"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "fec6798e1a70b6ae8513286b568b6437bf4191d0" + date: "2020-02-10 17:36:25 +0000" + description: "Remove encoding option" + pr_number: 1767 + scopes: ["humio_logs sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 1 + deletions_count: 58 + }, { + sha: "0075ea61060bb57701c821eb835071f303d31d51" + date: "2020-02-10 23:37:31 +0000" + description: "Option to reload config on file change" + pr_number: 1330 + scopes: ["config"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 260 + deletions_count: 1 + }, { + sha: "d986d809c937d800fbca0c55f146284fac8154c4" + date: "2020-02-11 17:27:10 +0000" + description: "Fix typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "12ecd000ba7bd8642ae51fa391eb1bde3f9fba87" + date: "2020-02-11 18:43:16 +0000" + description: "Fix example `host` values for `splunk_hec` sink" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "1b24dd1833d41a3fdac741a1190033b6d49cb6f0" + date: "2020-02-11 09:48:50 +0000" + description: "Support conditionally rebuilding the docker images" + pr_number: 1744 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 16 + deletions_count: 10 + }, { + sha: "5fc373ea5d48eb40a7610673804f30475617e375" + date: "2020-02-11 13:02:20 +0000" + description: "Fix unit tests reference" + pr_number: 1777 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 646 + deletions_count: 32 + }, { + sha: "9a1422efc56fac61e39ff0f33fd73091bc1728de" + date: "2020-02-11 21:59:52 +0000" + description: "Improve best effort support for Syslog formats (5424, 3164, nginx style, etc)" + pr_number: 1757 + scopes: ["syslog source"] + type: "enhancement" + breaking_change: false + author: "FungusHumungus" + files_count: 8 + insertions_count: 200 + deletions_count: 143 + }, { + sha: "2535012cb81d0fa5a4ea0ede00a2f21e1a71d192" + date: "2020-02-12 10:32:13 +0000" + description: "Improve `vector generate` syntax and formatting" + pr_number: 1773 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 195 + deletions_count: 23 + }, { + sha: "8570e00229424e48227be654798ffb06ee6dc184" + date: "2020-02-12 11:05:34 +0000" + description: "Test sub command improvements" + pr_number: 1739 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 11 + insertions_count: 329 + deletions_count: 183 + }, { + sha: "955cc18652449cb63e69ceba839511dd4d262427" + date: "2020-02-12 12:21:42 +0000" + description: "Add units to default value examples" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 11 + deletions_count: 5 + }, { + sha: "424a6a0f359db1e3cb398be8567f5be757b290bb" + date: "2020-02-12 13:00:41 +0000" + description: "Simplify docker example values" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 6 + insertions_count: 29 + deletions_count: 27 + }, { + sha: "93f43cab6331f5f9de41b55c036337beec7a2d9f" + date: "2020-02-12 20:13:45 +0000" + description: "Clarify generate error messages with type" + pr_number: 1780 + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 12 + deletions_count: 5 + }, { + sha: "15e69c8c2392f17104037cf94645231a874244d4" + date: "2020-02-13 18:51:08 +0000" + description: "Global default log schemas" + pr_number: 1769 + scopes: ["config"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 49 + insertions_count: 715 + deletions_count: 223 + }, { + sha: "dcc7a593cbaae72212af56b3e562e15d246556fc" + date: "2020-02-14 13:23:37 +0000" + description: "Add new global `log_schema` throughout the docs" + pr_number: 1795 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 24 + insertions_count: 445 + deletions_count: 132 + }, { + sha: "b4b3ebf18cdef172b037a717a1d6b723fcb7ed28" + date: "2020-02-14 13:43:56 +0000" + description: "Reduce homepage animation refresh rate" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9d2f3c06c1aabad9b726ae26e21c69c531bd4d6e" + date: "2020-02-14 22:00:31 +0000" + description: "Add behavior tests for `add_fields` and `remove_fields` transforms" + pr_number: 1781 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 6 + insertions_count: 144 + deletions_count: 1 + }, { + sha: "22f2e6d8822537b991e7ca7a6b81b7f0ee3d4571" + date: "2020-02-14 14:12:51 +0000" + description: "Stop homepage animation after a timeout" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 1 + insertions_count: 24 + deletions_count: 7 + }, { + sha: "61f9f3356bfdc266d6841d42344dd5dee194b766" + date: "2020-02-14 19:14:51 +0000" + description: "Support schema settings in unit tests" + pr_number: 1793 + scopes: ["cli"] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 8 + deletions_count: 3 + }, { + sha: "497e6bddca534e2ef64de676d24b4ba54709f9d6" + date: "2020-02-08 15:06:56 +0000" + description: "Disable failing CI check" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 6 + }, { + sha: "3233202cd457dd256a052ffe8037361dcac6d236" + date: "2020-02-15 14:12:10 +0000" + description: "Add templating as a domain" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 29 + deletions_count: 22 + }, { + sha: "513851364635457395c81bd38eea13297b8f37ec" + date: "2020-02-15 12:58:34 +0000" + description: "Correct lints and tests" + pr_number: 1789 + scopes: ["windows platform"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 9 + insertions_count: 57 + deletions_count: 31 + }, { + sha: "dc8009b78de156c9aed07eccf2498749f69ade4e" + date: "2020-02-15 16:19:31 +0000" + description: "Update README and add templating reference" + pr_number: 1805 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 35 + insertions_count: 601 + deletions_count: 333 + }, { + sha: "612450368e28882ebd31efcd9e2c7e26d3ac69b3" + date: "2020-02-15 16:40:36 +0000" + description: "Fix generate check" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 24 + deletions_count: 24 + }, { + sha: "98623e329ae1796259a85db98af96882a1911ce1" + date: "2020-02-15 17:08:35 +0000" + description: "Update README language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 4 + insertions_count: 8 + deletions_count: 10 + }, { + sha: "c1c529a4397262b60917d825f905fedb97f02434" + date: "2020-02-15 17:09:59 +0000" + description: "Short README bullet point" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "a366c6fa6d33e50aff378d605a245c72d7bfbcae" + date: "2020-02-16 19:23:39 +0000" + description: "Fix typo in `concat` transform example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "a96b7288d17d949a983b5756f3b94ea9d4f60918" + date: "2020-02-16 20:26:15 +0000" + description: "Clarify description of `ansi_stripper` transform" + pr_number: 1809 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "8e06920249f8da33cb23fa2ae76088d4a65ef04d" + date: "2020-02-17 13:02:38 +0000" + description: "Add more behavior tests for transforms" + pr_number: 1811 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 9 + insertions_count: 320 + deletions_count: 0 + }, { + sha: "93a68f0a7e7a0b3b164a5cb69d2a00ba4e50cf8a" + date: "2020-02-17 21:05:22 +0000" + description: "Add field order test at src/event/discriminant.rs" + pr_number: 1822 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 18 + deletions_count: 0 + }, { + sha: "32105959ee83aa160a830570245614fea864d001" + date: "2020-02-17 16:28:39 +0000" + description: "Update to docusaurus 2.0.0-alpha.41" + pr_number: 1831 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 13 + insertions_count: 1599 + deletions_count: 2779 + }, { + sha: "1e85b5bf151ab4ec6f0cab140d79b038aaa02844" + date: "2020-02-18 00:42:28 +0000" + description: "Support advanced `librdkafka` options" + pr_number: 1830 + scopes: ["kafka source"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 207 + deletions_count: 7 + }, { + sha: "4dfc382fdeb4e81c21e4a526c6917070127eec6f" + date: "2020-02-18 00:45:57 +0000" + description: "Support advanced `librdkafka` options" + pr_number: 1829 + scopes: ["kafka sink"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 185 + deletions_count: 2 + }, { + sha: "88963ef85ffcd431ff3f0b66d213831fa1c8ab2d" + date: "2020-02-17 14:54:27 +0000" + description: "Support templating for dynamic values" + pr_number: 1799 + scopes: ["add_fields transform"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 5 + insertions_count: 147 + deletions_count: 9 + }, { + sha: "19f06ebee8b35058b7248b381924b8b102028ca2" + date: "2020-02-17 17:55:46 +0000" + description: "Initial `loki` sink" + pr_number: 1783 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 19 + insertions_count: 2106 + deletions_count: 1 + }, { + sha: "5d6dc45f0c3a5c139ba5c2ae345862984e172c7e" + date: "2020-02-18 03:32:37 +0000" + description: "Add more `include_container_names`, `include_pod_uids`, and `include_namespaces` filters" + pr_number: 1501 + scopes: ["kubernetes source"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 1091 + deletions_count: 314 + }, { + sha: "5d97a1e36fd24d7ccfffc1ef5c225892d81e7118" + date: "2020-02-18 11:03:17 +0000" + description: "Ensure zero output event tests fail" + pr_number: 1814 + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 115 + deletions_count: 2 + }, { + sha: "7c3149fc5d989e689a0b5fb94ec03ff0634e2eba" + date: "2020-02-18 11:46:28 +0000" + description: "Update to docusaurus 2.0.0-alpha.43" + pr_number: null + scopes: ["website"] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 102 + deletions_count: 108 + }, { + sha: "b89aded488ca1a9bfeea06cad92365ee99586c66" + date: "2020-02-19 04:28:36 +0000" + description: "Initial `influxdb_metrics` sink implementation" + pr_number: 1759 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Jakub Bednář" + files_count: 16 + insertions_count: 2554 + deletions_count: 3 + }, { + sha: "841a8f8666ead89443dd7c98e8aa4b241537bac6" + date: "2020-02-19 09:08:41 +0000" + description: "Make token optional" + pr_number: 1837 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 54 + deletions_count: 16 + }, { + sha: "a7aa9e4cb29402942b0b97dd28fe25d4ff598aa4" + date: "2020-02-19 15:56:27 +0000" + description: "Add support for option groups and versions " + pr_number: 1859 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 82 + insertions_count: 1155 + deletions_count: 378 + }, { + sha: "f2b4a30db0dacef5be4fded5c36ad4a56a7088ff" + date: "2020-02-19 20:05:22 +0000" + description: "Fix influxdb_metrics common options groups" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 25 + deletions_count: 1 + }, { + sha: "9e6cc795f90c39fdb63819cf1bbbe3648eb71dc1" + date: "2020-02-19 23:04:07 +0000" + description: "only drop source field if JSON parse succeeds" + pr_number: 1863 + scopes: ["json_parser transform"] + type: "fix" + breaking_change: false + author: "Samuel Cormier-Iijima" + files_count: 4 + insertions_count: 33 + deletions_count: 8 + }, { + sha: "799709ac07cc1acb3cd10f142e586cc1af7a241f" + date: "2020-02-20 01:55:18 +0000" + description: "Update logging driver support" + pr_number: 1741 + scopes: ["docker source"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 120 + insertions_count: 342 + deletions_count: 111 + }, { + sha: "93100d4a6ffd67e1ab8c88ab35d61a791b2693f9" + date: "2020-02-20 13:24:22 +0000" + description: "Add transform context" + pr_number: 1855 + scopes: ["transforms"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 26 + insertions_count: 120 + deletions_count: 87 + }, { + sha: "2bba79917996a94eab1dee4c742e1ab4c89b1c19" + date: "2020-02-20 14:37:00 +0000" + description: "Inital Kubernetes `WatchClient`" + pr_number: 1746 + scopes: ["transforms"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 490 + deletions_count: 8 + }, { + sha: "657e658117d38f56bdb64b7d9bbbfee554c7d5d5" + date: "2020-02-20 22:07:29 +0000" + description: "Support `Array` and `Map` kinds in `Value`" + pr_number: 1836 + scopes: ["log data model"] + type: "enhancement" + breaking_change: true + author: "Alexander Rodin" + files_count: 3 + insertions_count: 123 + deletions_count: 19 + }, { + sha: "49afdba04d682b48dd91beae49800e590c2ce0ab" + date: "2020-02-20 14:39:54 +0000" + description: "Add min_version to relevant components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 77 + insertions_count: 341 + deletions_count: 171 + }, { + sha: "ead893c445a121b73aee48987a3cce1ca974a303" + date: "2020-02-20 14:49:46 +0000" + description: "Run `make generate`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 39 + deletions_count: 39 + }, { + sha: "269616777cef4466755dfd5a9d80f2ce3a1ac03f" + date: "2020-02-20 14:04:26 +0000" + description: "Initial `gcp_cloud_storage` sink implementation" + pr_number: 1794 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 26 + insertions_count: 2483 + deletions_count: 100 + }, { + sha: "f377e79e472096d6dc2eb91cd7b6e6b2e9e74f03" + date: "2020-02-20 15:47:39 +0000" + description: "Add docker info" + pr_number: 1862 + scopes: ["aws_ec2_metadata transform"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 177 + insertions_count: 325 + deletions_count: 217 + }, { + sha: "cebd0eb183c75872a5bb6f52905e4fde61773423" + date: "2020-02-20 16:04:52 +0000" + description: "Advance to `prod-ready` status" + pr_number: 1875 + scopes: ["cloudwatch_logs sink"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 18 + deletions_count: 16 + }, { + sha: "8f70eff1f8526be3931c97e662d3da9d26821e03" + date: "2020-02-20 15:15:38 +0000" + description: "Re-run `make generate` to fix GCS docs" + pr_number: 1877 + scopes: ["docs"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 61 + insertions_count: 14 + deletions_count: 214 + }, { + sha: "4b0492c8a040b4fe7d6ea71bc6d6a0f2d0bb3cd6" + date: "2020-02-20 16:23:04 +0000" + description: "Set `min_version` to `1.1.54378`" + pr_number: 1879 + scopes: ["clickhouse sink"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 13 + deletions_count: 2 + }, { + sha: "e9c3466ba6beecd30a3c1ffc304d384c7f665592" + date: "2020-02-21 00:36:47 +0000" + description: "Add feature `disable-resolv-conf` to make Vector tests pass on NixOS" + pr_number: 1874 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 9 + deletions_count: 2 + }, { + sha: "fa3fc07d703aa125075739459d72b7a12ca02027" + date: "2020-02-20 17:01:04 +0000" + description: "Clarify `split` transform behavior" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 18 + deletions_count: 14 + }, { + sha: "4f1b5d884acd577ea8060f2d029c4270da82e4f0" + date: "2020-02-20 19:19:11 +0000" + description: "Fix example" + pr_number: null + scopes: ["split transform"] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 3 + insertions_count: 59 + deletions_count: 45 + }, { + sha: "415429deae6c3d8d287546ab845af3c29bb559f1" + date: "2020-02-20 19:38:50 +0000" + description: "Refer to docker API" + pr_number: 1885 + scopes: ["docker source"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 64 + insertions_count: 77 + deletions_count: 5 + }, { + sha: "f2742dbd5b1d073436fd07af664e38930b36d587" + date: "2020-02-21 11:35:27 +0000" + description: "Allow multiple unit test inputs" + pr_number: 1824 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 376 + deletions_count: 135 + }, { + sha: "3b090aa80416d55679b543cdec3f361aa9869659" + date: "2020-02-21 13:08:34 +0000" + description: "Add `no_outputs_from` field to unit tests" + pr_number: 1817 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 182 + deletions_count: 0 + }, { + sha: "17340e26b0845ed95519027950a0bd9d388df71d" + date: "2020-02-21 16:11:05 +0000" + description: "Use `BTreeMap` instead of `HashMap` for logs and metrics" + pr_number: 1838 + scopes: ["log data model"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 23 + insertions_count: 158 + deletions_count: 121 + }, { + sha: "df8e103b937cc86d9ef12cf6b2638a4003f75b47" + date: "2020-02-21 11:16:29 +0000" + description: "Add description and bio to press page" + pr_number: 1884 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 26 + deletions_count: 1 + }, { + sha: "650ce582e2eab05f585505e5b66197eb672c556c" + date: "2020-02-21 12:26:59 +0000" + description: "Respect Lua types when converting back to events (#857)" + pr_number: 1886 + scopes: ["lua transform"] + type: "fix" + breaking_change: false + author: "Samuel Cormier-Iijima" + files_count: 3 + insertions_count: 78 + deletions_count: 13 + }, { + sha: "e8e0d23db92a6714d45966f0ca5075262e2ca912" + date: "2020-02-21 14:01:52 +0000" + description: "Fixup benches" + pr_number: 1883 + scopes: [] + type: "docs" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 6 + deletions_count: 11 + }, { + sha: "d4c0d54627d3773a44bb3decb3d385d4ec2606af" + date: "2020-02-21 17:07:37 +0000" + description: "Rename to `sematext_logs`" + pr_number: 1871 + scopes: ["sematext_logs sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 10 + insertions_count: 42 + deletions_count: 42 + }, { + sha: "89026d0a9a0dc99022ab116f71ef561995b78c69" + date: "2020-02-22 04:10:13 +0000" + description: "Better multi-line support" + pr_number: 1852 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "MOZGIII" + files_count: 6 + insertions_count: 915 + deletions_count: 103 + }, { + sha: "7c6995401ca2e7f94a9d82cf0137d9dc45b415ba" + date: "2020-02-22 04:46:43 +0000" + description: "File doc fix" + pr_number: 1896 + scopes: [] + type: "docs" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "a00295e59ca789f9c1f100a99f6f06c5c772bb84" + date: "2020-02-22 20:32:58 +0000" + description: "Add support for TLS" + pr_number: 1893 + scopes: ["kafka source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 44 + insertions_count: 341 + deletions_count: 122 + }, { + sha: "452dcc8d97790a1edccd60fddd1281a801bb48b1" + date: "2020-02-24 12:20:59 +0000" + description: "Support null values in log events" + pr_number: 1898 + scopes: ["log data model"] + type: "enhancement" + breaking_change: true + author: "Alexander Rodin" + files_count: 8 + insertions_count: 45 + deletions_count: 2 + }, { + sha: "ce1231fda37d50969745ca027b2f17bdd16fe283" + date: "2020-02-24 16:06:03 +0000" + description: "Change `timestamp_key` back to `message_key`" + pr_number: 1887 + scopes: ["kubernetes source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "794e473cc6e1834f28ba2681b6de35f5b346672e" + date: "2020-02-24 07:17:16 +0000" + description: "Initial `rename_fields` transform implementation" + pr_number: 1800 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Ana Hobden" + files_count: 13 + insertions_count: 520 + deletions_count: 201 + }, { + sha: "66d1434e5b46cbc3696fa63b97d3f58f329a9286" + date: "2020-02-24 11:17:49 +0000" + description: "Add multiline examples" + pr_number: 1899 + scopes: ["file source"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 20 + insertions_count: 406 + deletions_count: 198 + }, { + sha: "2984ae6575e1f5dfb034e68dc49e5e7ef7a08bf4" + date: "2020-02-24 19:31:53 +0000" + description: "Reduce RAM usage when running `make test`" + pr_number: 1901 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "9e963e5fea682f5a56dcba936258d6b9c1371a97" + date: "2020-02-24 11:55:17 +0000" + description: "Make note that prod-ready means a stable API" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 2 + insertions_count: 4 + deletions_count: 2 + }, { + sha: "992b4930b9d3052714cf90b66bfa6ead1d7855b2" + date: "2020-02-24 17:13:10 +0000" + description: "Add `timestamp_format` field" + pr_number: 1634 + scopes: ["clickhouse sink"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 185 + deletions_count: 4 + }, { + sha: "0aab35eda9ca3f12fd3deaf31890e5c373e58185" + date: "2020-02-24 18:28:13 +0000" + description: "Nest request fields" + pr_number: null + scopes: ["clickhouse sink"] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 59 + deletions_count: 1 + }, { + sha: "d5ef3cdf0c1cdf3bcc660440ac8a52fae0824309" + date: "2020-02-24 19:38:46 +0000" + description: "Add `swimlanes` transform" + pr_number: 1785 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Ashley Jeffs" + files_count: 21 + insertions_count: 1003 + deletions_count: 179 + }, { + sha: "f6408958fa06d199f0f03789e250828e0b23c6f2" + date: "2020-02-24 20:08:08 +0000" + description: "Fix swimlanes documentation" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 87 + deletions_count: 90 + }, { + sha: "6a2ab77beab26cf266a9f60a3e48289cefa9f4f3" + date: "2020-02-24 17:02:16 +0000" + description: "Use checksum fingerprinting" + pr_number: 1912 + scopes: ["kubernetes source"] + type: "fix" + breaking_change: false + author: "Samuel Cormier-Iijima" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "84e59e5505ec3756351aa43e334771437555f6fb" + date: "2020-02-25 11:06:25 +0000" + description: "Fix `make check-code`" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 4 + }, { + sha: "11c11c568b1f87f290cab60e70c6d25493eb403d" + date: "2020-02-20 11:22:04 +0000" + description: "Prepare v0.8.0 release" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 16 + insertions_count: 16467 + deletions_count: 9442 + }, { + sha: "bdad2880e86ebcda84d3f126bb62bd21a86ead00" + date: "2020-02-25 14:53:12 +0000" + description: "Relax the requirements for `no_outputs_from` unit test targets" + pr_number: 1921 + scopes: [] + type: "chore" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 8 + deletions_count: 19 + }, { + sha: "856bd2803d6766d335210cc562587fdae3f9a204" + date: "2020-02-25 15:08:26 +0000" + description: "Clean up swimlanes examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 32 + deletions_count: 22 + }, { + sha: "b5ffd12d3cfb8921d44b8b7782de02738e32cf51" + date: "2020-02-25 18:31:10 +0000" + description: "Enable `rdkafka` by default" + pr_number: 1918 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "775dc92674bcdb5b1c6773675e6e88e0cba70ad6" + date: "2020-02-25 18:35:47 +0000" + description: "Fix build with `--no-default-features`" + pr_number: 1920 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "dbb870bd5ceddb64d09f8458e2582cf768a266a3" + date: "2020-02-25 11:20:16 +0000" + description: "Allow kubernetes tests to compile" + pr_number: 1911 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }] + } + "0.4.0": { + date: "2019-09-24" + codename: "Hello World Again" + whats_next: [] + commits: [{ + sha: "1357a3fa6b9acd0dd1d4b9e577969bf0594a5691" + date: "2019-09-12 12:12:12 +0000" + description: "Add initial rework of rate limited logs" + pr_number: 778 + scopes: ["observability"] + type: "perf" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 300 + deletions_count: 270 + }, { + sha: "bf81efdddf801232aa44ab76184e1368f1ce4f78" + date: "2019-09-12 10:38:59 +0000" + description: "Increase docker-release timeout" + pr_number: 858 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "9705ae833c918189f786ac72c6f974102385911b" + date: "2019-09-12 17:32:50 +0000" + description: "New `add_tags` transform" + pr_number: 785 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 35 + insertions_count: 1352 + deletions_count: 347 + }, { + sha: "9705ae833c918189f786ac72c6f974102385911b" + date: "2019-09-12 17:32:50 +0000" + description: "New `remove_tags` transform" + pr_number: 785 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 35 + insertions_count: 1352 + deletions_count: 347 + }, { + sha: "4cd5e539565732fd1289bc9f5ddba2897404f441" + date: "2019-09-11 18:55:02 +0000" + description: "New `file` sink" + pr_number: 688 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Kirill Taran" + files_count: 22 + insertions_count: 1355 + deletions_count: 8 + }, { + sha: "ee527daf254144bdbf78e8aeb87febfb61816bde" + date: "2019-09-11 11:45:30 +0000" + description: "update stream-based diagram" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 206 + deletions_count: 261 + }, { + sha: "ff83f94362d841270c71abbcf415776d0b6e78c3" + date: "2019-09-11 11:33:48 +0000" + description: "update roadmap link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 8 + insertions_count: 13 + deletions_count: 9 + }, { + sha: "e331a886afbf7ce5db4296321449a16bc1ed41e1" + date: "2019-09-11 09:39:43 +0000" + description: "favor older files and allow configuring greedier reads" + pr_number: 810 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 9 + insertions_count: 393 + deletions_count: 70 + }, { + sha: "c3cbc55477c477d7a7b3ff7cd7b216b412ed1c14" + date: "2019-09-11 09:36:09 +0000" + description: "clarify sampler transform rate documentation" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 16 + deletions_count: 6 + }, { + sha: "a7d237573f4b60235b21973a9c3f5c0b9362e03f" + date: "2019-09-10 15:37:23 +0000" + description: "gitbook straight doesnt escape |, so we will have to live with \\|" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "cd4637f154ec1b4e41918516d5ae0bac62bd63e6" + date: "2019-09-10 15:36:11 +0000" + description: "use | for the pipe character...gitbook" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "2f5c8898a2867701242be3691cc9a5f5ec30ba2a" + date: "2019-09-10 15:35:19 +0000" + description: "add SUMMARY.md.erb template" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 90 + deletions_count: 0 + }, { + sha: "049d94e3ba49a869dd30717ef09cf6e8854e1853" + date: "2019-09-10 15:34:57 +0000" + description: "use literals when escaping |" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "ae507430d4e9ff704803f31ef5367319bbfb6497" + date: "2019-09-10 15:33:20 +0000" + description: "gitbook doesnt like double escaped | characters" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "7b25d9170d327c6e2078cad837259b1aad7e5e6e" + date: "2019-09-10 15:30:34 +0000" + description: "fix file source table escaping" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 9 + deletions_count: 3 + }, { + sha: "1f488eec08cf518e7199adb05b81d38f3cbb0995" + date: "2019-09-10 12:09:51 +0000" + description: "kafka souce it an at_least_once delivery guarantee" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "538b1e789330589cc970166c21daa87b629d3592" + date: "2019-09-10 12:08:39 +0000" + description: "add note about kafka topic pattern matching, ref https://github.com/timberio/vector/issues/819" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 16 + deletions_count: 10 + }, { + sha: "81778163c916b1a94756b19c7313c904fe666721" + date: "2019-09-10 12:05:27 +0000" + description: "fix path detection" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "dbfbf081fcfba499c3cd152b5e2f1b84517f694a" + date: "2019-09-10 10:55:33 +0000" + description: "fix sink links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 26 + deletions_count: 26 + }, { + sha: "de5940ed2592a59a3f86bb5c35b0f019304331d2" + date: "2019-09-10 10:50:48 +0000" + description: "generate SUMMARY.md to ensure new components show up in the side bar" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 69 + deletions_count: 35 + }, { + sha: "33c48a6482ef7b7a5bd11bb4d867a4f97908d93e" + date: "2019-09-10 10:41:48 +0000" + description: "add kafka source to summary.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 11 + deletions_count: 1 + }, { + sha: "e4f5b2630ad9e537b3e576ab73f468855b0f46eb" + date: "2019-09-10 16:36:15 +0000" + description: "Add bundler to requirements" + pr_number: 845 + scopes: [] + type: "docs" + breaking_change: false + author: "Matthias Endler" + files_count: 1 + insertions_count: 7 + deletions_count: 1 + }, { + sha: "94cadda25e552b0eb82e58ea85eda10e6b787197" + date: "2019-09-09 10:22:35 +0000" + description: "Add checkpointing support" + pr_number: 816 + scopes: ["journald source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 342 + deletions_count: 77 + }, { + sha: "c83e7e0c7c3a994c817c4a8ae0ac41c3a6c1818d" + date: "2019-09-05 14:12:25 +0000" + description: "Make the headers and query tables optional." + pr_number: 831 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 11 + deletions_count: 5 + }, { + sha: "c8736ea623df8ed17cd04478785522459bd4c105" + date: "2019-09-05 14:27:46 +0000" + description: "Fix docker nightly builds" + pr_number: 830 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 11 + deletions_count: 17 + }, { + sha: "e9b5988bd26c550c2308ba65798872634fe6a4f8" + date: "2019-09-04 17:16:50 +0000" + description: "allow aggregating multiple lines into one event" + pr_number: 809 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 5 + insertions_count: 285 + deletions_count: 16 + }, { + sha: "1b8f2bb9f2b2ec60ba02a0be6f449be19950f8eb" + date: "2019-09-04 21:46:24 +0000" + description: "Topology test refactoring" + pr_number: 748 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Bittrance" + files_count: 3 + insertions_count: 677 + deletions_count: 830 + }, { + sha: "1dac7d8c3e399d750891bbe74fb0580c179e4138" + date: "2019-09-03 15:37:02 +0000" + description: "Add support for unverified HTTPS" + pr_number: 815 + scopes: ["http sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 124 + deletions_count: 28 + }, { + sha: "928e37f4de188134565e05e04943e04dcc95e6a0" + date: "2019-09-03 23:27:30 +0000" + description: "Add missing clickhouse integration test feature" + pr_number: 818 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Markus Holtermann" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "712a7219aeb2e8f4fe87efdbcf11493dc0cb9d97" + date: "2019-09-03 14:12:43 +0000" + description: "Update to `tokio-udp` v0.1.5" + pr_number: 817 + scopes: ["udp source"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 21 + deletions_count: 53 + }, { + sha: "1c6dd7b0b07be08f3c8b794d58d9c0f32c07454f" + date: "2019-08-29 21:18:36 +0000" + description: "Use new UdpFramed" + pr_number: 808 + scopes: ["udp source"] + type: "chore" + breaking_change: false + author: "ktff" + files_count: 3 + insertions_count: 55 + deletions_count: 50 + }, { + sha: "c0f8e78195e88457589d95eaa731a3ab699132d2" + date: "2019-08-27 21:13:06 +0000" + description: "make fingerprinting strategy configurable" + pr_number: 780 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 7 + insertions_count: 330 + deletions_count: 121 + }, { + sha: "2ee1c39c251344bca78caa29824927b2c967ca84" + date: "2019-08-27 19:37:18 +0000" + description: "fix tcp sink docs formatting issues" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 33 + deletions_count: 41 + }, { + sha: "0f72a2b1669a97e4838d3ca852d2f68a878915f4" + date: "2019-08-27 17:29:43 +0000" + description: "Initial `journald` source implementation" + pr_number: 702 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 20 + insertions_count: 1366 + deletions_count: 1 + }, { + sha: "73a092647ef36db3b489a760b75da81cc27ef608" + date: "2019-08-27 15:25:00 +0000" + description: "Add support for TLS" + pr_number: 765 + scopes: ["tcp sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 10 + insertions_count: 460 + deletions_count: 19 + }, { + sha: "4d3d5d5a79ef5124ec8a96acec558b4e63026bcb" + date: "2019-08-27 15:29:58 +0000" + description: "add test for tokenizer handling multiple spaces" + pr_number: null + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "90bded60b2ba5618dbfbed35c7f6ac000ca5a40b" + date: "2019-08-27 14:24:59 +0000" + description: "add build steps as part of overall testing" + pr_number: 788 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "19aef1601e7c2a03b340d2af0b1d4849d9a48862" + date: "2019-08-27 17:42:00 +0000" + description: "`encoding = \"text\"` overrides" + pr_number: 803 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Bittrance" + files_count: 1 + insertions_count: 48 + deletions_count: 13 + }, { + sha: "ed7605a0aeb07e16385907cc56b190345f088752" + date: "2019-08-26 17:38:18 +0000" + description: "Docker build image tweaks" + pr_number: 802 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 28 + deletions_count: 22 + }, { + sha: "15cd77ee9f65bc749ed17cf3673e06ca02d25a2b" + date: "2019-08-26 22:11:32 +0000" + description: "Add new `kafka` source" + pr_number: 774 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Alexander Rodin" + files_count: 16 + insertions_count: 786 + deletions_count: 0 + }, { + sha: "3a57fe52addb3c7f0760437f31518fb9ed8f1bf0" + date: "2019-08-25 19:50:55 +0000" + description: "Use GNU ld instead of LLVM lld for x86_64-unknown-linux-musl" + pr_number: 794 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 26 + deletions_count: 64 + }, { + sha: "5f88b0aa44e1909736a842f9311ae3c54f0d99c2" + date: "2019-08-24 14:21:10 +0000" + description: "update github label links to use new lowercase format" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 32 + insertions_count: 174 + deletions_count: 174 + }, { + sha: "b504f8542a57991b59a7fbd233712afdff172383" + date: "2019-08-24 11:35:50 +0000" + description: "remove sinks guidelines from docs and put them in contributing.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 0 + deletions_count: 56 + }, { + sha: "0296c9e0553b63de1d2e8fe616da12c9233b67db" + date: "2019-08-24 11:30:43 +0000" + description: "merge DEVELOPING.md into CONTRIBUTING.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 286 + deletions_count: 307 + }, { + sha: "e74e4694f5358154b51cbb96475972498f01d426" + date: "2019-08-24 17:32:34 +0000" + description: "Add tags support to log_to_metric transform" + pr_number: 786 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 6 + insertions_count: 127 + deletions_count: 48 + }, { + sha: "af1a700c1b79c542b41a677b14356a1c4c8291fa" + date: "2019-08-24 10:25:52 +0000" + description: "fix relative linking on root docs pages, ref: https://github.com/timberio/vector/pull/793" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 16 + deletions_count: 12 + }, { + sha: "a02ea63fc70d6b1b2c736e48fc203a09e439305b" + date: "2019-08-24 10:17:16 +0000" + description: "update data model docs with relevant changes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 73 + deletions_count: 33 + }, { + sha: "990714c5b9fce922f05720ab3b84e1aec8b39826" + date: "2019-08-24 06:15:03 +0000" + description: "Restore rust-toolchain after building" + pr_number: 792 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "758d646be0c9f8dd1dca5c997dc57a3541eafcec" + date: "2019-08-23 23:12:49 +0000" + description: "fix source output types" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 7 + insertions_count: 20 + deletions_count: 15 + }, { + sha: "5e7132806adfad922a165a5da0f6c0ac3a5d0854" + date: "2019-08-23 14:44:18 +0000" + description: "update add companies link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "65bb96690c29e474d63ab1850ce2904c466a97e5" + date: "2019-08-23 12:20:41 +0000" + description: "add companies list" + pr_number: 789 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 5 + insertions_count: 38 + deletions_count: 4 + }, { + sha: "b6fcdc1176f3586f47f9593294c3fc81c6b08492" + date: "2019-08-23 12:04:07 +0000" + description: "add log/metrics correlation feature" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 10 + deletions_count: 6 + }, { + sha: "761993432a817176ba89ead07b681c36e3b3a1f7" + date: "2019-08-23 09:42:21 +0000" + description: "add namespace config" + pr_number: 782 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 7 + insertions_count: 67 + deletions_count: 22 + }, { + sha: "7e8e7a2417244e58082c855576898d9b5edb1971" + date: "2019-08-21 15:40:44 +0000" + description: "update cloudwatch examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 7 + insertions_count: 22 + deletions_count: 17 + }, { + sha: "38ba67ede2adfb8aba59c161dd47b05a519c1426" + date: "2019-08-21 15:00:40 +0000" + description: "fix authentication list, attempt 2" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "28d62b40b19e56dfaeb851f5313ef937af9d9c79" + date: "2019-08-21 14:59:19 +0000" + description: "fix authentication list" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 20 + deletions_count: 4 + }, { + sha: "6ff75029425ab791f3b63818c74b55481be45139" + date: "2019-08-21 14:29:02 +0000" + description: "fix partitioning language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 11 + deletions_count: 32 + }, { + sha: "48778f848a8b3ee934a28c796d69589eab9b9242" + date: "2019-08-20 16:26:31 +0000" + description: "Only notify on failed/fixed master builds" + pr_number: 779 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Jesse Szwedko" + files_count: 1 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "6bbd5c429706b0ca898e9cefb3d44c767adfac61" + date: "2019-08-20 13:52:23 +0000" + description: "fix UDP docs typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "546ba47f692d3deea48a067d419be3c5bda42121" + date: "2019-08-20 13:50:57 +0000" + description: "fix errors in udp source docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 24 + deletions_count: 33 + }, { + sha: "760d21bb84bddb692e7ac31ac8a8a2e0a86784ab" + date: "2019-08-20 13:46:53 +0000" + description: "fix from archive installation typos" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 7 + deletions_count: 7 + }, { + sha: "ff0d46f2236de6dc1bb81c1a9f898a9bf378c484" + date: "2019-08-20 13:43:35 +0000" + description: "keep nightly builds" + pr_number: 772 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 10 + insertions_count: 154 + deletions_count: 108 + }, { + sha: "ab9aff1340786e8bac0ce4b7eeff31ff90e746d7" + date: "2019-08-20 19:22:02 +0000" + description: "add labels support" + pr_number: 773 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 2 + insertions_count: 131 + deletions_count: 57 + }, { + sha: "8a29c615e59e5e8728d08b09bfadc92739aa75ec" + date: "2019-08-19 21:07:51 +0000" + description: "add udp source" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 12 + insertions_count: 485 + deletions_count: 0 + }, { + sha: "c71c421d013d4bd68223982a36d73c3805bb4886" + date: "2019-08-19 21:02:35 +0000" + description: "add clickhouse sink documentation" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 478 + deletions_count: 0 + }, { + sha: "d723a95ce9ff8689635c6bed9b4ec78a1daea81b" + date: "2019-08-19 15:56:29 +0000" + description: "Add support for custom query parameters" + pr_number: 766 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 64 + deletions_count: 5 + }, { + sha: "8361f6a36ce604e39ea124b2864060c5cfa680ae" + date: "2019-08-16 14:55:42 +0000" + description: "Error type for types conversion" + pr_number: 735 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 101 + deletions_count: 42 + }, { + sha: "bed79bbaf9ed5ac566b1765ff989a4cbdd5aefcc" + date: "2019-08-16 13:58:08 +0000" + description: "Initial `clickhouse` sink implementation" + pr_number: 693 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 18 + insertions_count: 698 + deletions_count: 29 + }, { + sha: "a55ed98f2aecce097f7a4e31b424e6ad47a4703e" + date: "2019-08-15 18:47:27 +0000" + description: "Add rust-toolchain file and bump to 1.37" + pr_number: 761 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 27 + insertions_count: 55 + deletions_count: 68 + }, { + sha: "252d145caa473a97b93051178b00ddfd7436cc46" + date: "2019-08-15 19:33:42 +0000" + description: "add tags into metrics model" + pr_number: 754 + scopes: ["metric data model"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 8 + insertions_count: 243 + deletions_count: 33 + }, { + sha: "b0d58784a917931f8bdc0e16981bd2ff62108472" + date: "2019-08-15 10:59:10 +0000" + description: "Add guidance for writing healthchecks" + pr_number: 755 + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 116 + deletions_count: 4 + }, { + sha: "7d2427ff1afa2addf29d96b5508133628b1e4e50" + date: "2019-08-15 11:50:40 +0000" + description: "Add dynamic group creation" + pr_number: 759 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 8 + insertions_count: 236 + deletions_count: 27 + }, { + sha: "18abb24e03f1e5ec1613ed44ad1674ba8765361f" + date: "2019-08-15 11:43:59 +0000" + description: "Initial `aws_cloudwatch_metrics` sink implementation " + pr_number: 707 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 9 + insertions_count: 588 + deletions_count: 5 + }, { + sha: "60fe033ae52bf2fd7558b17037d37c9e236a02d1" + date: "2019-08-14 17:05:21 +0000" + description: "fix docs generator file ext" + pr_number: null + scopes: ["docs"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "0d0fcdfb226394ca6f26f55cd24785cc948f49d7" + date: "2019-08-14 15:40:06 +0000" + description: "Add support for additional headers to the Elasticsearch sink" + pr_number: 758 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 62 + deletions_count: 1 + }, { + sha: "37c998922a2a8ae96d17e82e6fd56c41679c66f8" + date: "2019-08-14 22:47:44 +0000" + description: "Update Metric::Set usage" + pr_number: 756 + scopes: ["prometheus sink"] + type: "fix" + breaking_change: false + author: "ktff" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "756b115fe4db5e81358c61f88444c87010ec9268" + date: "2019-08-14 22:19:27 +0000" + description: "Initial `udp` source implementation" + pr_number: 738 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "ktff" + files_count: 3 + insertions_count: 230 + deletions_count: 1 + }, { + sha: "014d6f63044476c541f9f3f0f9f1092e2446ca05" + date: "2019-08-14 22:16:35 +0000" + description: "Support sets" + pr_number: 733 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "ktff" + files_count: 2 + insertions_count: 178 + deletions_count: 6 + }, { + sha: "584196c14caa150bc97edc39e339976c2927cd1e" + date: "2019-08-14 16:50:47 +0000" + description: "reload with unparseable config" + pr_number: 752 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kirill Taran" + files_count: 2 + insertions_count: 14 + deletions_count: 8 + }, { + sha: "a2196b89075bbd71c82340bcab607a8eca72d1dc" + date: "2019-08-13 16:02:21 +0000" + description: "Add HTTP Basic authorization" + pr_number: 749 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 105 + deletions_count: 5 + }, { + sha: "fdc863ce7f757c75a277818195fdbfe170963765" + date: "2019-08-13 11:19:15 +0000" + description: "Ignore topology replace source and transform" + pr_number: 740 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "dd99c6cdf430f86285856ead0d75c2e1dab4f104" + date: "2019-08-12 20:15:05 +0000" + description: "fix typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 28 + insertions_count: 28 + deletions_count: 28 + }, { + sha: "59f3a185cdc9038b2cdc78c027239f6f241e03e9" + date: "2019-08-12 15:27:11 +0000" + description: "Hot fix (cargo-fmt)" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Kirill Taran" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "2b8c1cdcaa5fd577770a8a5cf63fb60d4c7b50d7" + date: "2019-08-12 17:12:06 +0000" + description: "Validation of sinks and sources for non-emptiness." + pr_number: 739 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kirill Taran" + files_count: 2 + insertions_count: 14 + deletions_count: 2 + }, { + sha: "f59065106149213c6227b7542c7b9e46f9caf119" + date: "2019-08-12 11:08:34 +0000" + description: "fix typos" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 8 + insertions_count: 8 + deletions_count: 8 + }, { + sha: "f23553a792214649acf091d2b71a23c837acee9f" + date: "2019-08-12 16:48:17 +0000" + description: "Fix typo in vector image" + pr_number: 744 + scopes: [] + type: "docs" + breaking_change: false + author: "Matthias Endler" + files_count: 1 + insertions_count: 1 + deletions_count: 134 + }, { + sha: "9a2f2b1e25699b9083990cf32d1e13582de6455b" + date: "2019-08-12 15:17:18 +0000" + description: "Fix typos" + pr_number: 743 + scopes: [] + type: "docs" + breaking_change: false + author: "Matthias Endler" + files_count: 16 + insertions_count: 30 + deletions_count: 29 + }, { + sha: "95a19e1f9c28fdcb4ba1c337d935cabb5a29b176" + date: "2019-08-10 18:34:16 +0000" + description: "Improve x86_64-unknown-linux-musl build" + pr_number: 722 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 5 + deletions_count: 2 + }, { + sha: "adf0f1f5cc1828fd2be012d2487bc64caa748de3" + date: "2019-08-09 21:04:56 +0000" + description: "It is now possible to reload a with a non-overlap…" + pr_number: 681 + scopes: ["topology"] + type: "fix" + breaking_change: false + author: "Bittrance" + files_count: 2 + insertions_count: 63 + deletions_count: 3 + }, { + sha: "febdde0419fd7665916ea76bfb310ec1ad805c41" + date: "2019-08-09 13:56:39 +0000" + description: "Add sink healthcheck disable" + pr_number: 731 + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 22 + insertions_count: 531 + deletions_count: 125 + }, { + sha: "3a2990c4da5aef70caa106f4d7382dcf3fc1ec1e" + date: "2019-08-09 11:25:50 +0000" + description: "update sink flow diagrams" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 25 + deletions_count: 27 + }, { + sha: "db6829d7da7e7a3ffdf6086cadc1beb3455c79ce" + date: "2019-08-08 16:35:00 +0000" + description: "fix release-s3 error" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "96b1e89bf47929edd361baf4f4da34ff40a5c8a8" + date: "2019-08-08 23:25:46 +0000" + description: "add timestamps into metrics" + pr_number: 726 + scopes: ["metric data model"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 11 + insertions_count: 335 + deletions_count: 195 + }, { + sha: "f22e3af44256d2c07b9f6fcc5369f94f7c405dd4" + date: "2019-08-09 05:27:17 +0000" + description: "don't serialize MapValue::Null as a string" + pr_number: 725 + scopes: ["log data model"] + type: "fix" + breaking_change: false + author: "Markus Holtermann" + files_count: 2 + insertions_count: 30 + deletions_count: 4 + }, { + sha: "5a7d1516c5c08cee44cc84043db10a8253380407" + date: "2019-08-08 11:41:13 +0000" + description: "RUSTSEC-2019-0011 by updating crossbeam-epoch" + pr_number: 723 + scopes: ["security"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 20 + deletions_count: 5 + }, { + sha: "84f87eaf788b61d07bb989410e7e74948f75ee12" + date: "2019-08-07 22:33:00 +0000" + description: "remove filter on nightly builds" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "e267f72beda5092984b0f6b4c92fb785037419b9" + date: "2019-08-07 19:25:24 +0000" + description: "add prometheus histograms test" + pr_number: 719 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexey Suslov" + files_count: 2 + insertions_count: 39 + deletions_count: 22 + }, { + sha: "1bf385dea6f4d7a02aebc9c3cc010defe5d56277" + date: "2019-08-07 12:06:55 +0000" + description: "Use a locked down version of localstack" + pr_number: 720 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "e4108bc1b067ac83aa0dc85fcab9564af75367ef" + date: "2019-08-07 18:02:14 +0000" + description: "use double for storing metric values" + pr_number: 717 + scopes: ["metric data model"] + type: "chore" + breaking_change: false + author: "Alexey Suslov" + files_count: 4 + insertions_count: 18 + deletions_count: 23 + }, { + sha: "3c36de2691c263dfbbda747d3d328b445ec174ff" + date: "2019-08-06 14:19:26 +0000" + description: "use shorter component ids" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 30 + insertions_count: 47 + deletions_count: 47 + }, { + sha: "855b00793cd4b2cee35788a020d1e729a02b5005" + date: "2019-08-06 19:43:06 +0000" + description: "Support histograms" + pr_number: 675 + scopes: ["prometheus sink"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 18 + insertions_count: 742 + deletions_count: 596 + }, { + sha: "bedccf409b61c7eeaa9d96126fed184ff0df27fe" + date: "2019-08-06 11:42:04 +0000" + description: "all new * as a commit title category" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 12 + deletions_count: 0 + }, { + sha: "495d8be6299de55d5e31a84cfe467f263582d9df" + date: "2019-08-05 23:41:36 +0000" + description: "fix duplicate section references" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 13 + insertions_count: 145 + deletions_count: 18 + }, { + sha: "929026eb7bdeea9459ab81324124f46b85674c78" + date: "2019-08-05 23:21:07 +0000" + description: "aws_s3_sink encoding is not required" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 184 + deletions_count: 189 + }, { + sha: "a9993b8e1aa6557be5ddef47cc3d305fe0a50a56" + date: "2019-08-05 23:06:48 +0000" + description: "add valid scopes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 63 + deletions_count: 2 + }, { + sha: "9b7b786d74e1ad57322ae7f5e3ec5bcd2073d9cf" + date: "2019-08-05 22:37:34 +0000" + description: "fix typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 11 + insertions_count: 40 + deletions_count: 40 + }, { + sha: "4c0046c54cd7bef189a7a9422f3b7608ecb17ebd" + date: "2019-08-05 22:36:17 +0000" + description: "remove false default values that communicate dynamic behavior" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 16 + insertions_count: 136 + deletions_count: 96 + }, { + sha: "67bbaa52e35e1736f383f995a0399c9960d34a24" + date: "2019-08-05 22:18:02 +0000" + description: "fix html escaping issues" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 33 + insertions_count: 248 + deletions_count: 248 + }, { + sha: "906bb21fe220d827b0fbc018d223bda0792d6006" + date: "2019-08-05 22:14:20 +0000" + description: "add html escaping" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 37 + insertions_count: 326 + deletions_count: 281 + }, { + sha: "e190e96e925d819d7460fab64f37fdb4241b88ad" + date: "2019-08-06 03:48:52 +0000" + description: "fall back to global data_dir option (#644)" + pr_number: 673 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Denis Andrejew" + files_count: 14 + insertions_count: 191 + deletions_count: 50 + }, { + sha: "ce3bc8dd988539643df3f5a6447696a7ebac108f" + date: "2019-08-04 11:36:35 +0000" + description: "fix lua drop event example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 10 + }, { + sha: "bb4e220318db02c9d52703d7a603f70b9731473d" + date: "2019-08-04 11:32:10 +0000" + description: "fix alternative suggestions" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 32 + insertions_count: 70 + deletions_count: 59 + }, { + sha: "7f9a86f5de31a3d2b17dc8a22b7ae27420eceed6" + date: "2019-08-04 11:25:06 +0000" + description: "update log_to_metric docs to reflect all metric types" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 7 + insertions_count: 284 + deletions_count: 58 + }, { + sha: "b317396794f97d66208379f2cfffe007ac1a51fa" + date: "2019-08-04 10:15:41 +0000" + description: "update enum language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 37 + insertions_count: 239 + deletions_count: 230 + }, { + sha: "86fef80098e7e438852917dbcd58eec0e8e8ac44" + date: "2019-08-03 15:59:09 +0000" + description: "add summary for Vector config syntax" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 6 + deletions_count: 2 + }, { + sha: "d391ad95a259cab216f4848f7c938a612749e043" + date: "2019-08-03 15:57:52 +0000" + description: "fix template syntax broken link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 16 + deletions_count: 16 + }, { + sha: "e37229641eac91eee4c9699b34ae821ad8548ada" + date: "2019-08-03 15:56:28 +0000" + description: "fix doc typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "5406afbf6d5632be9202d83a4d1055c91d022549" + date: "2019-08-03 15:55:56 +0000" + description: "remote strftime comment in s3 key_prefix description" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "7fea8ad0876a90a7b6bdc3e14f686978b5d109f3" + date: "2019-08-03 15:52:47 +0000" + description: "add documentation on Vectors template syntax" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 43 + insertions_count: 2601 + deletions_count: 137 + }, { + sha: "57d57db514a0de09dcbd4f98405c9b9a26b1c027" + date: "2019-08-03 13:15:08 +0000" + description: "fix build syntax error" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "5eb88c35797cebf49e1ede178b94598a0afdd5eb" + date: "2019-08-03 13:13:29 +0000" + description: "fix nightly builds, release to docker and s3" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 128 + deletions_count: 71 + }, { + sha: "4b066e6d7f40fd3cc6d967bfc527c0c8aa8c3718" + date: "2019-08-03 11:15:11 +0000" + description: "cleanup docker language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 28 + deletions_count: 19 + }, { + sha: "b8ee40ead6b03b23f11940f2038dd0c10580e48b" + date: "2019-08-03 11:06:08 +0000" + description: "update installer script to use musl statically linked archive" + pr_number: null + scopes: ["chore"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7e161e6aa92f20e3b758fb0a87889fa05346ab18" + date: "2019-08-03 11:02:01 +0000" + description: "update chat to chat/forum since it servers both purposes now" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 30 + insertions_count: 31 + deletions_count: 31 + }, { + sha: "02cfaa1e6a78f08d0eba93cdb15a6049940f7d8a" + date: "2019-08-03 10:58:27 +0000" + description: "add data model diagram" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 216 + deletions_count: 7 + }, { + sha: "ecdf4ed715901b8a9c132b57df120b5bdf1a2f63" + date: "2019-08-02 18:31:21 +0000" + description: "fix docker html entity escaping" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 12 + deletions_count: 6 + }, { + sha: "b511fd9362421e6ac3b73187a8ac1f61ea309501" + date: "2019-08-02 18:29:23 +0000" + description: "update vector docker images to reflect their base image" + pr_number: 705 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 34 + deletions_count: 32 + }, { + sha: "8fe6b2252bfe7bf20f17327a46771742eb80396c" + date: "2019-08-02 16:40:06 +0000" + description: "use templates for ES index and S3 key prefix" + pr_number: 686 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 275 + deletions_count: 151 + }, { + sha: "fbed6bdddddc627f6400bf36a075fcd897a8b09a" + date: "2019-08-02 17:24:18 +0000" + description: "unflatten event before outputting" + pr_number: 678 + scopes: ["log data model"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 17 + insertions_count: 552 + deletions_count: 40 + }, { + sha: "12ce3a069bbc809a08d5561ddbd4593c318b9960" + date: "2019-08-02 17:16:13 +0000" + description: "recommend alpine docker image" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "fb9595e5abc2072ec454c2e84e59a056cab5d65b" + date: "2019-08-02 16:32:51 +0000" + description: "attempt to fix data model type links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "f07e3ce41cea2492114e8b20d35f703843a825ac" + date: "2019-08-02 16:01:55 +0000" + description: "singularize log event types" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "0d19588b28bab2f2c44760e610516c5ce17ad6b4" + date: "2019-08-02 15:58:47 +0000" + description: "expand on log event types" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 21 + insertions_count: 66 + deletions_count: 32 + }, { + sha: "8758a7c978fa62fe88960f4dd10ebc17a604a743" + date: "2019-08-02 15:42:02 +0000" + description: "fix subnav item names for log and event" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "093bf3f5e7054cbfd58a4443a454e7532a8f844e" + date: "2019-08-02 15:41:20 +0000" + description: "fix path typo in subnav" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "e57120ab88d37b817a207588bca03d377e9c94b0" + date: "2019-08-02 15:40:31 +0000" + description: "rename log and metric subnav items because Gitbook..." + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "9f2e7dda97814710e62fe8d1cf07ff2ee7d4ccf4" + date: "2019-08-02 15:39:48 +0000" + description: "rename log and metric event titles because gitbook..." + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "69a1c145621cc0a922f059f340d3fcd28938631b" + date: "2019-08-02 15:37:59 +0000" + description: "add log and metrics subnav items for the data model section" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "05a2aecb33dd95e1b1e99f923767b2e40b082339" + date: "2019-08-02 15:34:46 +0000" + description: "Add configurable partition keys" + pr_number: 692 + scopes: ["aws_kinesis_streams sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 54 + insertions_count: 1075 + deletions_count: 522 + }, { + sha: "e70d1834e34c58ae0e31b630f3a148d5ed3c64d4" + date: "2019-08-02 10:44:50 +0000" + description: "cleanup musl archive language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 10 + }, { + sha: "a9ec4a75d753f6e939df4b31b9d3ba8f700ff890" + date: "2019-08-02 10:35:15 +0000" + description: "release nightly instead of on each commit" + pr_number: 703 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 47 + deletions_count: 37 + }, { + sha: "bce95689d801b35a635328f9524613da3b137b39" + date: "2019-08-02 09:24:55 +0000" + description: "remove musl warnings since it includes all features now" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "c8a304590fdced8b867a8b3d1d44b86c67dd0bfb" + date: "2019-08-01 18:54:58 +0000" + description: "fix broken links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 2 + deletions_count: 5 + }, { + sha: "4b0735f5b64da0c7d6aba1a15d803d1767048fe4" + date: "2019-08-01 18:34:07 +0000" + description: "fix docker.md parsing error" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "c3345f5da237fcfb94caccdd88ab0adfb7e333eb" + date: "2019-08-01 18:14:05 +0000" + description: "Add rate limit notice when it starts" + pr_number: 696 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 40 + deletions_count: 11 + }, { + sha: "df3df71d2b9c1b2f53f2590bc5bb0c1a639ff1c4" + date: "2019-08-01 18:00:03 +0000" + description: "make binary stripping an option during the release process, fixes an issue stripping armv7 binaries" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 6 + deletions_count: 1 + }, { + sha: "22f8454d4b70496262f57e3f4e4232768fc30ebd" + date: "2019-08-01 17:24:05 +0000" + description: "add TARGET env var to musl build archive step" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "88726cb21b0c4284373cfd12ce1b230d307e8a07" + date: "2019-08-01 13:35:34 +0000" + description: "Remove extra debug flags" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "c47d9cd610befbede9846c61437be748884f4c46" + date: "2019-08-01 13:35:09 +0000" + description: "Fix build-archive script to support multiple features" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 23 + deletions_count: 10 + }, { + sha: "feca20d2ba5cba4c88bef431a1ec4988ba26f6c9" + date: "2019-08-01 11:57:01 +0000" + description: "Disable armv7 musleabihf build" + pr_number: 698 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 19 + deletions_count: 19 + }, { + sha: "d2df9ba321990a0bf5996f18135351fa8bbf296c" + date: "2019-08-01 18:52:16 +0000" + description: "Build for x86_64-unknown-linux-musl with all features and optimized binary size" + pr_number: 689 + scopes: ["operations"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 365 + deletions_count: 20 + }, { + sha: "e37995eec33941545694d8c9d8b784f081c4c785" + date: "2019-08-01 11:18:23 +0000" + description: "remove Slack since we no longer use Slack" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "0ae355f76c66147032b6ef5e4bdab141bfd2eeef" + date: "2019-08-01 11:17:20 +0000" + description: "update documentation to reflect new help resources" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 37 + insertions_count: 208 + deletions_count: 82 + }, { + sha: "57bc070a11ef3141ee5829d043f3720e359da726" + date: "2019-08-01 11:07:09 +0000" + description: "Retry requests on timeouts" + pr_number: 691 + scopes: ["networking"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 146 + deletions_count: 20 + }, { + sha: "9675b5197d60d3ff6a3ddd81cd9b4ec08bc92576" + date: "2019-08-01 11:06:41 +0000" + description: "Default `doc_type` to `_doc` and make it op…" + pr_number: 695 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "9ec1c644e82b029b943a1017f8176e77b1e494bd" + date: "2019-07-31 16:12:16 +0000" + description: "remove forum references, we recommend filing a help issue or joining our Slack channel instead" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 34 + insertions_count: 33 + deletions_count: 97 + }, { + sha: "05032c6803bf1d45eaf2372a58d46fadaa9646bb" + date: "2019-07-26 12:01:25 +0000" + description: "Add retry ability to cloudwatch" + pr_number: 663 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 24 + insertions_count: 605 + deletions_count: 355 + }, { + sha: "df6816f2432039236ba14361262012380b8f5c82" + date: "2019-07-26 16:53:55 +0000" + description: "replace some references to \"sink\" with `component.type`" + pr_number: 685 + scopes: [] + type: "docs" + breaking_change: false + author: "Denis Andrejew" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "89a32737baa90f36de69da19fe95ba6734283368" + date: "2019-07-25 15:53:32 +0000" + description: "Update nom from 0.5.0-beta2 to 0.5" + pr_number: 679 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 21 + deletions_count: 7 + }, { + sha: "6670fc00c576788fecb9e7f8321f76f2dc08eb6f" + date: "2019-07-25 16:41:31 +0000" + description: "minor fixes in getting-started page" + pr_number: 682 + scopes: [] + type: "docs" + breaking_change: false + author: "Cédric Da Fonseca" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "1fbd6a4eead61518d8678ca39b6baadbbec30314" + date: "2019-07-24 09:56:17 +0000" + description: "use templates for metric names in log_to_metric" + pr_number: 668 + scopes: ["log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 141 + deletions_count: 58 + }, { + sha: "f1dfaf90512f3ea8a8a0bee743bfb297b08657df" + date: "2019-07-23 13:24:31 +0000" + description: "add coercer transform" + pr_number: 666 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 17 + insertions_count: 689 + deletions_count: 20 + }, { + sha: "2ecb9897b3d469a0eb0c180db9ba371cde87443b" + date: "2019-07-22 13:49:30 +0000" + description: "Use multi-stage builds for vector-slim Docker image" + pr_number: 672 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 11 + deletions_count: 2 + }, { + sha: "bd22713c4b544b82b56b079bd2ef7411af951226" + date: "2019-07-22 13:44:46 +0000" + description: "fix broken build process" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 4 + deletions_count: 4 + }, { + sha: "01458f4e5764e6d06ca04b3a569eeb767ac58eee" + date: "2019-07-22 20:21:26 +0000" + description: "fix spelling in READMEs" + pr_number: 671 + scopes: [] + type: "docs" + breaking_change: false + author: "Brian Kabiro" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "6c47a6716206d066191d4e67d810df0f7f761c96" + date: "2019-07-22 13:13:53 +0000" + description: "build x86_64-unknown-linux-musl with all features" + pr_number: 669 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 54 + deletions_count: 27 + }, { + sha: "0a4ef9774092eef2d9d48ec7167b73d46caf464a" + date: "2019-07-21 10:05:28 +0000" + description: "update batch_timeuot unit to seconds across all docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 9 + insertions_count: 28 + deletions_count: 24 + }, { + sha: "a69668faab8c759e40377e696e5750f6bc58f244" + date: "2019-07-21 09:16:01 +0000" + description: "add support for armv7 releases, both gnueabihf and musleabihf" + pr_number: 662 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 84 + deletions_count: 4 + }, { + sha: "d6f3a1a4c2f8da71b950725f7bb164f526c12386" + date: "2019-07-19 11:10:53 +0000" + description: "switch batch_timeout from bytes to seconds" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "6812ca83f8de0a5c2bd6d131f3c7026b2a223d57" + date: "2019-07-19 18:10:09 +0000" + description: "Use correct units in example batch timeouts" + pr_number: 664 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 12 + insertions_count: 12 + deletions_count: 12 + }, { + sha: "b4575e662c5d06eb52d43678c6031d095bfa06de" + date: "2019-07-18 14:23:44 +0000" + description: "reusable templating system for event values" + pr_number: 656 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 162 + deletions_count: 64 + }, { + sha: "075e1cca2744e3fb868e852236345c484ae4973e" + date: "2019-07-18 14:34:36 +0000" + description: "add timberio/vector-alpine docker image" + pr_number: 659 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 86 + deletions_count: 5 + }, { + sha: "e3e31d04d87513f21083a094d90b79b358ed4cd8" + date: "2019-07-18 10:18:54 +0000" + description: "remove labels support from log_to_metric" + pr_number: 657 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 20 + deletions_count: 36 + }, { + sha: "32d2f6ba6d47f5c7f4c031dc25a7026edf4f869d" + date: "2019-07-18 09:52:43 +0000" + description: "push Histogram and Set metrics from logs" + pr_number: 650 + scopes: ["log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 155 + deletions_count: 68 + }, { + sha: "75f05f4626323cb47cdfbf6caf6ca0030f500f15" + date: "2019-07-17 17:03:11 +0000" + description: "retry HttpDispatch errors for s3 and kinesis" + pr_number: 651 + scopes: ["aws_s3 sink"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "c7654ce407fc525a22f0fa4b5a5fa949bb4247de" + date: "2019-07-17 16:26:30 +0000" + description: "rename call when releasing to latest and edge channels in s3" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "5099d14e6f809235e87f0ee95737ea7e67a5a8b6" + date: "2019-07-17 16:03:29 +0000" + description: "add support for x86_64-unknown-linux-musl releases" + pr_number: 654 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 10 + insertions_count: 51 + deletions_count: 41 + }, { + sha: "4e1e9e21b71a9ccdc38a38d51b9727f332721f05" + date: "2019-07-17 15:18:07 +0000" + description: "Update smallvec to `v0.6.10`" + pr_number: 652 + scopes: ["tech debt"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "1f2319f9b49260951824bce7c3d75548347f1d2a" + date: "2019-07-17 15:04:24 +0000" + description: "Add `jemallocator` feature flag" + pr_number: 653 + scopes: ["operations"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 3 + deletions_count: 2 + }, { + sha: "1ea7e30d460f7f00be6d138f0d875ed8efbb0904" + date: "2019-07-17 10:36:59 +0000" + description: "add test around min file size for fingerprinting" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 41 + deletions_count: 1 + }, { + sha: "fc93a801ba5ae8ae90132727f3ad194691b6bfb0" + date: "2019-07-16 21:49:20 +0000" + description: "accept both logs and metrics" + pr_number: 631 + scopes: ["console sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 31 + insertions_count: 600 + deletions_count: 318 + }, { + sha: "33489984d28285740d26dcd2bc3183dfafb9711f" + date: "2019-07-16 22:02:24 +0000" + description: "Refactor metrics sampling, rename Timer to Histogram" + pr_number: 648 + scopes: ["metric data model"] + type: "chore" + breaking_change: false + author: "Alexey Suslov" + files_count: 6 + insertions_count: 96 + deletions_count: 81 + }, { + sha: "fddfbe83ee89a890662872a6a614c8213da8d37b" + date: "2019-07-16 13:01:23 +0000" + description: "add type coercion" + pr_number: 632 + scopes: ["grok_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 12 + insertions_count: 322 + deletions_count: 78 + }, { + sha: "a308ed2744bddf9f2b4b2607fec40800c622bd7b" + date: "2019-07-15 22:59:20 +0000" + description: "test thread usage to ensure tests pass on all machines" + pr_number: 646 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 40 + deletions_count: 36 + }, { + sha: "59592d3a1e62169ffe934c7773c4ebc3d6392630" + date: "2019-07-15 17:54:27 +0000" + description: "add convetional commits to contributing" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 12 + deletions_count: 1 + }, { + sha: "2a07d727b2acc57dd72746356dd5ad0284b23208" + date: "2019-07-15 17:51:36 +0000" + description: "add AWS env vars" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 9 + deletions_count: 7 + }, { + sha: "92dfdca8f99986961d4eb66ce480770700ee1994" + date: "2019-07-15 17:47:38 +0000" + description: "add exit codes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 17 + deletions_count: 0 + }, { + sha: "0cd9c302dfbd37f320e56ac385801af6bdf18404" + date: "2019-07-15 17:40:12 +0000" + description: "Add validating page for administration docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 49 + deletions_count: 2 + }, { + sha: "81b9e4f06a8fe6b0ce5f3592921d6bebea7aa85f" + date: "2019-07-15 16:46:33 +0000" + description: "Add docs about file checkpointing" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 31 + deletions_count: 8 + }, { + sha: "b3d1a767b46302dc8d698812b960afce23c511b2" + date: "2019-07-15 16:33:18 +0000" + description: "Add reference to glob_minimum_cooldown option" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 5 + deletions_count: 3 + }, { + sha: "b420159287f19a2aa4405da6f90fcea733d9de28" + date: "2019-07-15 16:23:48 +0000" + description: "Fix Github labels query param" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 14 + insertions_count: 17 + deletions_count: 13 + }, { + sha: "7fa2515374f55848e128deb50e153488c9fe330f" + date: "2019-07-15 16:18:43 +0000" + description: "Fix sampler rate example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "de2f4b3a9f845f57b6e5d40342e8f4a64639f91d" + date: "2019-07-15 16:17:41 +0000" + description: "Add component context section" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 22 + insertions_count: 135 + deletions_count: 85 + }, { + sha: "dd48943579fe07525aa2f93a7ecf357617d54194" + date: "2019-07-15 15:43:12 +0000" + description: "Add fingerprint options for file source to docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 95 + deletions_count: 31 + }, { + sha: "f8aeff54adf9aa46175a98b5211705393d0c4c20" + date: "2019-07-15 15:34:50 +0000" + description: "Add sampler transform to summary.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "fe54f1e9d28ea18c94063170819c2fced8397a26" + date: "2019-07-15 15:33:43 +0000" + description: "Add glob_minimum_cooldown option to file source docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 29 + deletions_count: 0 + }, { + sha: "1a4229093e15452f2c378a81e448ce85167709f3" + date: "2019-07-15 15:04:19 +0000" + description: "Use one consistent env var syntax" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 31 + insertions_count: 67 + deletions_count: 67 + }, { + sha: "0d0c9d62f2f737359331cc2a52d988850552f0fc" + date: "2019-07-15 20:41:33 +0000" + description: "Improve configuration validation and make it more strict" + pr_number: 552 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kirill Taran" + files_count: 17 + insertions_count: 236 + deletions_count: 156 + }, { + sha: "524355cde9009936fe5eeae0a85315bd3405dc94" + date: "2019-07-15 14:37:21 +0000" + description: "Add semtantic.yml to only check PR titles" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "1176821bd86431ef8cf0b9db763a85828c3116c7" + date: "2019-07-15 11:53:41 +0000" + description: "Use the proper type in the blackhole example" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "20f678c0d96ce9ad282abc34d30a23ce13f63a97" + date: "2019-07-14 17:56:04 +0000" + description: "Add doc sections for all sink egress methods" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 19 + insertions_count: 259 + deletions_count: 215 + }, { + sha: "f1e0938c5ef508dda26005e567d6aaab6eabe0ab" + date: "2019-07-14 20:42:28 +0000" + description: "Fix argument type" + pr_number: 639 + scopes: [] + type: "chore" + breaking_change: false + author: "Ayhan" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "0db4e693ec618ea21f5273c85c0810f15973353d" + date: "2019-07-13 08:51:14 +0000" + description: "Batch diagram language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "472bd3574089994d464e1b91746bfc35a382e934" + date: "2019-07-13 08:49:50 +0000" + description: "Fix authentication formatting" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 12 + deletions_count: 24 + }, { + sha: "5746533135b33aae4b35aee5feb169ade0284810" + date: "2019-07-13 08:43:59 +0000" + description: "Fix config example headers for transforms and sources" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 34 + insertions_count: 188 + deletions_count: 108 + }, { + sha: "4b76ae8e2dba91dd0943aa7947325c8ed2b7cdf4" + date: "2019-07-13 08:30:34 +0000" + description: "Add relevance text to options table" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 19 + insertions_count: 99 + deletions_count: 134 + }, { + sha: "150994527180a69eb848fffa9a810d7fe376d2d1" + date: "2019-07-13 08:23:08 +0000" + description: "Add relevant when... tag for options that depend on other options in docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 17 + insertions_count: 40 + deletions_count: 25 + }, { + sha: "fcbf1aef0eee29bc3a36f2cce7e5ab2387a0acb7" + date: "2019-07-13 07:42:55 +0000" + description: "Fix environment variable language in docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 28 + insertions_count: 95 + deletions_count: 92 + }, { + sha: "419b2f0f73c89d81eb636cb8af43a52489fca3cb" + date: "2019-07-12 16:51:55 +0000" + description: "Update grok_parser language" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 16 + deletions_count: 18 + }, { + sha: "94d6838e901d745ada07cc62649dbbc3cef52bcb" + date: "2019-07-12 13:12:16 +0000" + description: "Add examples to the add_fields docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 31 + deletions_count: 22 + }, { + sha: "c4976fd7d54d23cff1595d1de183ce04ba81153a" + date: "2019-07-12 10:34:05 +0000" + description: "Fix section references for fields that include Regex special characters" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 7 + insertions_count: 8 + deletions_count: 7 + }, { + sha: "c98f455cfc7dda43b2f09d5804134b3832ae3153" + date: "2019-07-12 09:44:16 +0000" + description: "Link to log data model in add fields docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 5 + deletions_count: 2 + }, { + sha: "a95201a3c1fe73f7d250f313fe786458bc9aa880" + date: "2019-07-12 09:42:16 +0000" + description: "Add default envirnoment variables section" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 33 + insertions_count: 317 + deletions_count: 73 + }, { + sha: "934011d78f8fc92bfff922a61bb0bf0269ad0ac7" + date: "2019-07-12 12:09:33 +0000" + description: "Fix cloudwatch test by dropping sink" + pr_number: 626 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 6 + deletions_count: 2 + }, { + sha: "0fb311fbce5d4a304d82e50e186fd03636bf1c44" + date: "2019-07-10 08:15:13 +0000" + description: "Fix add_fields transform docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 64 + deletions_count: 136 + }, { + sha: "0820c1087f9c524d55a96f726a56afd09c2f0069" + date: "2019-07-12 10:08:40 +0000" + description: "Add File checkpoint feature." + pr_number: 609 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "apjones-proton" + files_count: 6 + insertions_count: 539 + deletions_count: 325 + }, { + sha: "4a88262f95ace846b60d4ebe2857d1c1d3170bbe" + date: "2019-07-11 17:24:12 +0000" + description: "Back out change to dash handling" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 3 + deletions_count: 7 + }, { + sha: "d8a8e961a35d2eb7dadf183a69f214a4637a47b0" + date: "2019-07-11 17:36:24 +0000" + description: "Add cloudwatch partitioning and refactor partition buffer" + pr_number: 519 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 7 + insertions_count: 656 + deletions_count: 211 + }, { + sha: "e93621195a390383ae5fec131f2e01874ea842d8" + date: "2019-07-11 17:32:52 +0000" + description: "Add `--color` option and tty check for ansi colors" + pr_number: 623 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 64 + deletions_count: 37 + }, { + sha: "009803467f4513827abbe4a28d8170a5593ea2c5" + date: "2019-07-10 19:50:42 +0000" + description: "Log when regex does not match" + pr_number: 618 + scopes: ["regex_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 13 + deletions_count: 1 + }, { + sha: "96fadd8decbae32b6ce55063566ba683e27cdc96" + date: "2019-07-10 18:03:27 +0000" + description: "File tests timeout instead of hang if channel is stuck open." + pr_number: 612 + scopes: [] + type: "chore" + breaking_change: false + author: "apjones-proton" + files_count: 1 + insertions_count: 63 + deletions_count: 25 + }, { + sha: "80347525540296db8e9a06140e9359093d9144a6" + date: "2019-07-10 09:01:17 +0000" + description: "Debian 10 verification step" + pr_number: 615 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 30 + deletions_count: 4 + }, { + sha: "734aa228d859357c671c3e61732fdd49b1d9295b" + date: "2019-07-10 07:47:27 +0000" + description: "Fix debian-slim install line in docs" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "6eaa2912a8f2440fc968c87e0f6287da0f752291" + date: "2019-07-09 22:11:45 +0000" + description: "Dont use HTML characters in default value for docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 11 + insertions_count: 30 + deletions_count: 30 + }, { + sha: "2d1c24a8ced93db9496248a52271f5a0d0f6b534" + date: "2019-07-09 22:09:43 +0000" + description: "Restore docker installation instructions" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 84 + deletions_count: 2 + }, { + sha: "82274cca2047432ecc378f8343703dc5d96ab801" + date: "2019-07-09 13:54:16 +0000" + description: "Add section references to each option within the docs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 56 + insertions_count: 453 + deletions_count: 2658 + }, { + sha: "099f062c35c5888a79422d4ee1abca1e200d6a4b" + date: "2019-07-09 01:16:54 +0000" + description: "Fix lock file" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 9 + deletions_count: 18 + }, { + sha: "27fce01ed595969e716bac9c0f688b5813e81e4d" + date: "2019-07-08 17:18:26 +0000" + description: "Restore \"send your first event\" guide" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 96 + deletions_count: 4 + }, { + sha: "b44cc232bc9dd9cee1acac9726b18a02fff0ab7d" + date: "2019-07-08 17:00:46 +0000" + description: "Fix docs/README.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 20 + deletions_count: 248 + }, { + sha: "67a0031a34ba9e94bb772c9fcc0c7d9e2f052507" + date: "2019-07-08 16:50:21 +0000" + description: "Fix log_to_metrics examples" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "7f54fcd82f45adcf2b5fa29cc1e68b7b5b8fd292" + date: "2019-07-08 16:45:45 +0000" + description: "Ensure \"How It Works\" sections are alphabetically sorted" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 33 + insertions_count: 247 + deletions_count: 114 + }, { + sha: "bd54765b1d394bb072b42a2239673dc263f05ddc" + date: "2019-07-08 16:41:36 +0000" + description: "Ensure docs links are relative" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 36 + insertions_count: 618 + deletions_count: 389 + }, { + sha: "7c5743a9cc2913b337bfbe96f8b0767d49d8ade2" + date: "2019-07-08 12:35:12 +0000" + description: "Add log_to_metric documentation" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 54 + insertions_count: 2030 + deletions_count: 1472 + }, { + sha: "8646a0104998dae7e341fe0a389ebdaaa181e6f1" + date: "2019-07-08 17:13:49 +0000" + description: "Add filename extension option and fix trailing slash" + pr_number: 596 + scopes: ["aws_s3 sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 70 + deletions_count: 18 + }, { + sha: "368b73a22db806b750dff44ed3e7aaac1859d467" + date: "2019-07-08 16:47:14 +0000" + description: "Rename tracing crates" + pr_number: 608 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 12 + insertions_count: 94 + deletions_count: 75 + }, { + sha: "5021837ba934214b6f7ffa3720c7553c1b17179f" + date: "2019-07-08 14:37:49 +0000" + description: "Fix README" + pr_number: 610 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 81 + deletions_count: 0 + }, { + sha: "6a9707d3c419aaa88d3f11a46acbf0e21c0c7bf6" + date: "2019-07-08 13:12:32 +0000" + description: "Initial rate limit subscriber" + pr_number: 494 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 6 + insertions_count: 378 + deletions_count: 0 + }, { + sha: "ac1f714f0ab8bcd2449cf763da66341f141a3b8e" + date: "2019-07-08 18:41:38 +0000" + description: "Convert \"-\" into \"nil\"" + pr_number: 580 + scopes: ["tokenizer transform"] + type: "enhancement" + breaking_change: false + author: "Andy Georges" + files_count: 2 + insertions_count: 24 + deletions_count: 1 + }, { + sha: "971640c239451aea5d217e72d84a0221dc4b7117" + date: "2019-07-07 21:22:29 +0000" + description: "Cleanup documentation headers" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 37 + insertions_count: 726 + deletions_count: 309 + }, { + sha: "6975b45c05db10550e7432a138dfe9144fd6f4b2" + date: "2019-07-07 22:39:47 +0000" + description: "Move dynamically generated docs to ERB templates" + pr_number: 601 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 149 + insertions_count: 9434 + deletions_count: 7109 + }, { + sha: "1d98e789c8db3cee3f45303ff73b102290ddbb97" + date: "2019-07-07 20:46:36 +0000" + description: "Add Ruby and Bundler 2 to development requirements" + pr_number: 600 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 4 + deletions_count: 0 + }, { + sha: "7e3cb94bacdbf26a7c0487f57696a46e420d8d2f" + date: "2019-07-04 18:51:05 +0000" + description: "Fix gauge misspelling" + pr_number: 594 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 9 + deletions_count: 9 + }, { + sha: "25ece4711cf918f321fc00e7d91efc5f582a69ef" + date: "2019-07-04 18:47:15 +0000" + description: "Fix include exclude" + pr_number: 593 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 20 + deletions_count: 20 + }, { + sha: "8fac6fe083e4fdfee270cbf1be18ed7cd4eee9e9" + date: "2019-07-04 18:37:54 +0000" + description: "Add env var example to add_fields documentation" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 18 + deletions_count: 6 + }, { + sha: "894c9df97e881483ee48b4319813c9132344e46c" + date: "2019-07-04 17:15:45 +0000" + description: "Fix documentation array syntax" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 45 + insertions_count: 136 + deletions_count: 136 + }, { + sha: "e05314708498fa5d97054ff15510478f8aa66893" + date: "2019-07-04 17:14:01 +0000" + description: "Resolve documentation typos and formatting issues" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 61 + insertions_count: 875 + deletions_count: 290 + }, { + sha: "b5c1cd7bad03ec37166d924b29dea17acc22b85a" + date: "2019-07-04 16:47:19 +0000" + description: "Add check for pending documentation changes" + pr_number: 592 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 83 + insertions_count: 750 + deletions_count: 1132 + }, { + sha: "fbbf5d1d6a8dbd03208faa4fc5b3af577a97ac91" + date: "2019-07-04 12:47:37 +0000" + description: "Fix configuration documentation headings" + pr_number: 591 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 27 + deletions_count: 27 + }, { + sha: "e8682cc307ce3a74b719e809a388a20860aee658" + date: "2019-07-04 12:42:49 +0000" + description: "Cleanup documentation conventions" + pr_number: 590 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 132 + deletions_count: 321 + }, { + sha: "5f3a00216fecf17f44f3a5a6be032fe9e362bb3d" + date: "2019-07-04 11:05:10 +0000" + description: "Reduce test threads from 8 to 4" + pr_number: 587 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "92277fbfae7a1873a35ea75a725e9b71e963a0d5" + date: "2019-07-03 22:48:31 +0000" + description: "Rename tokio-trace to tracing" + pr_number: 578 + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 30 + insertions_count: 197 + deletions_count: 194 + }, { + sha: "4074d8430a183d3eaccca311044c3ad733785f57" + date: "2019-07-03 18:13:59 +0000" + description: "Add make signoff command in pull request template" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "35afcc8ee85d2d826bf4feb348bb1b5c5e15b781" + date: "2019-07-03 15:43:04 +0000" + description: "Update Makefile and DEVELOPING.md" + pr_number: 570 + scopes: [] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 82 + deletions_count: 65 + }, { + sha: "b57af065e88ff915ef9b8450114394063615a5f5" + date: "2019-07-03 15:28:24 +0000" + description: "Use MiB not mib in docs" + pr_number: 577 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 10 + deletions_count: 10 + }, { + sha: "4fce85e98dac0d15edddc25adebe0db13b4c072f" + date: "2019-07-03 15:06:06 +0000" + description: "Link to License" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "8de9ad3a22f0c4789a760b4f0e57a84163edddec" + date: "2019-07-03 14:54:46 +0000" + description: "Add DCO and update CONTRIBUTING.md" + pr_number: 571 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 117 + deletions_count: 39 + }, { + sha: "b6316953a480a5ee161c6a61b33b4d33de23434d" + date: "2019-07-03 09:31:16 +0000" + description: "Fix tests" + pr_number: null + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "16da8e55e7408473a15adf045de6bf9ebf6517af" + date: "2019-07-03 09:22:28 +0000" + description: "Use floats for metrics values" + pr_number: 553 + scopes: ["metric data model"] + type: "enhancement" + breaking_change: false + author: "Alexey Suslov" + files_count: 4 + insertions_count: 19 + deletions_count: 19 + }, { + sha: "d8eadb08f469e7e411138ed9ff9e318bd4f9954c" + date: "2019-07-02 07:04:39 +0000" + description: "output multiple metrics from a single log" + pr_number: null + scopes: ["log_to_metric transform"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 57 + deletions_count: 7 + }, { + sha: "fe7f2b503443199a65a79dad129ed89ace3e287a" + date: "2019-06-27 17:07:11 +0000" + description: "adjust transform trait for multiple output events" + pr_number: null + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 14 + deletions_count: 5 + }, { + sha: "5b58adb048b5740e5420255141f33a58e280852f" + date: "2019-07-02 16:40:04 +0000" + description: "Remove makefile from list of languages" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "7c4b6488841b86c64ce41aadf7c1552a87b27d0a" + date: "2019-07-02 15:33:19 +0000" + description: "Use printf in the install.sh script" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 15 + deletions_count: 15 + }, { + sha: "7538d6eaae49666e4fc320a0f44425a69f789c38" + date: "2019-07-02 12:07:53 +0000" + description: "Bump check-stable box size" + pr_number: 555 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "a703de875fa7181c78d080509bbfed427a63fd11" + date: "2019-07-02 08:46:47 +0000" + description: "make sure Cargo.lock gets updated on version bump" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 8 + deletions_count: 6 + }, { + sha: "15d6b26409761aa5eb15c70082fc02f83d1e949c" + date: "2019-07-02 02:04:50 +0000" + description: "Ensure new bumped version uses -dev" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "e53c86c0895ef0dfa48dbe8a4c572ea1c9d87a84" + date: "2019-07-02 02:03:58 +0000" + description: "Start v0.4.0-dev" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 14 + deletions_count: 14 + }, { + sha: "27f79e2f8d5d99685bae8549d697355b77a0ad12" + date: "2019-09-13 00:44:30 +0000" + description: "add all parsed syslog fields to event" + pr_number: 836 + scopes: ["syslog source"] + type: "feat" + breaking_change: false + author: "Kirill Taran" + files_count: 4 + insertions_count: 322 + deletions_count: 123 + }, { + sha: "b9a7812e2e4cd7c7a7c87d77a84a3488b82b8f64" + date: "2019-09-13 09:50:18 +0000" + description: "log a single warning when ignoring small files" + pr_number: 863 + scopes: ["file source"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 33 + deletions_count: 9 + }, { + sha: "65c189a6200f670c7faf1f6137e1e6ec77193bc5" + date: "2019-09-13 09:54:05 +0000" + description: "add logging when we can't tail file" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 11 + deletions_count: 8 + }, { + sha: "95f7d345687737ba61ded2202196f4a40e3f8b85" + date: "2019-09-13 15:05:10 +0000" + description: "Support AWS authentication" + pr_number: 864 + scopes: ["elasticsearch sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 270 + deletions_count: 43 + }, { + sha: "7cb7cf3efc5f64d926458fcacc8228ee543e203d" + date: "2019-09-14 10:47:10 +0000" + description: "add check_urls make argument" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 8 + insertions_count: 50 + deletions_count: 50 + }, { + sha: "a4f963f3f7362c34335880659ea6d4a8c49d412f" + date: "2019-09-14 10:50:03 +0000" + description: "create component md file if it does not yet exist, closes #849" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 12 + deletions_count: 6 + }, { + sha: "35247a654181d1b3ace0309508707c6300b03561" + date: "2019-09-16 17:08:58 +0000" + description: "add split transform" + pr_number: 850 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Matthias Endler" + files_count: 17 + insertions_count: 964 + deletions_count: 7 + }, { + sha: "662d74cce6fe8dbbbe4ff00e4cf61ef2d484676a" + date: "2019-09-16 11:12:47 +0000" + description: "ignore .tmp files" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "aa74f1ec31764278a4dc53e9abdc53f52a742a89" + date: "2019-09-16 10:51:18 +0000" + description: "Error types" + pr_number: 811 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 51 + insertions_count: 627 + deletions_count: 302 + }, { + sha: "1a90ce7182388de44bc5079cc1168842b5490168" + date: "2019-09-16 13:52:51 +0000" + description: "Move .metadata.toml to /.meta/*" + pr_number: 872 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 53 + insertions_count: 1873 + deletions_count: 2035 + }, { + sha: "dadb904fda1681eec6d9063406fa2e43cfc7ba64" + date: "2019-09-16 14:49:13 +0000" + description: "switch to more modern kafka image" + pr_number: 875 + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 10 + deletions_count: 4 + }, { + sha: "503bbc0494eca9b2d62267b4a29adc3c2ce27ff4" + date: "2019-09-16 22:18:33 +0000" + description: "Fix some typos in file-source crate" + pr_number: 871 + scopes: [] + type: "chore" + breaking_change: false + author: "Matthias Endler" + files_count: 1 + insertions_count: 6 + deletions_count: 6 + }, { + sha: "a97f2984778c4ffdf0412b16e27e43e9a32b2884" + date: "2019-09-17 11:08:09 +0000" + description: "Fix String error return in elasticsearch config parser" + pr_number: 883 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 23 + deletions_count: 11 + }, { + sha: "f18796a35b9d61d3747386a0290c5ae50bc57310" + date: "2019-09-17 18:06:34 +0000" + description: "Simpler, less noisy component options" + pr_number: 888 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 88 + insertions_count: 199 + deletions_count: 2042 + }, { + sha: "ec73082da655d5e17c7023fef3b5c1893a4d7bf4" + date: "2019-09-18 10:24:07 +0000" + description: "Introduce crate-level `Result` type" + pr_number: 884 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 41 + insertions_count: 88 + deletions_count: 94 + }, { + sha: "269c6054f7d74c11cf5a933f79f8966befa2c579" + date: "2019-09-19 12:15:26 +0000" + description: "add commit types for semantic prs" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "f9bf4bc05a1afd6d3861c96ba107e02120d447fa" + date: "2019-09-20 13:32:29 +0000" + description: "Add relese-meta make target for preparing release metadata" + pr_number: 898 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 355 + deletions_count: 7 + }, { + sha: "6caa0f9fcc72c9becf2588b0839e2849c1d9b28e" + date: "2019-09-20 14:45:09 +0000" + description: "automatically create missing component templates" + pr_number: 899 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 81 + insertions_count: 342 + deletions_count: 297 + }, { + sha: "2e5c0e0998d14f4e95397c92ffd92f85b54ff682" + date: "2019-09-20 16:35:32 +0000" + description: "update checker docker image too include activesupport" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 13 + deletions_count: 10 + }, { + sha: "8c48932bb9cfd7267bf72bf260684d5fa93e8150" + date: "2019-09-21 18:42:48 +0000" + description: "Simplify link system and resolution" + pr_number: 901 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 138 + insertions_count: 2700 + deletions_count: 2694 + }, { + sha: "c38f85c570194a5eb3e689c73550305e02a5bf1d" + date: "2019-09-22 12:24:11 +0000" + description: "Generate CHANGELOG.md" + pr_number: 903 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 20 + insertions_count: 699 + deletions_count: 82 + }, { + sha: "2776d7556176299e9090f319b6eca4bfcaa03b79" + date: "2019-09-22 12:29:08 +0000" + description: "simplify readme installation links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 8 + deletions_count: 27 + }, { + sha: "1dc0f93b0771cda8b075f0501151ab7d62247e29" + date: "2019-09-22 12:39:38 +0000" + description: "fix archive name for nightly builds" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "c2792e1c543e9a67782b5dd43d3c9ec6f0ac82db" + date: "2019-09-22 15:31:27 +0000" + description: "dont upload version triple archives to s3" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 1 + deletions_count: 7 + }, { + sha: "dc2582b31eb1a7722c50d6eb7a6799ae04ec7f66" + date: "2019-09-22 15:44:19 +0000" + description: "use consistent archive names across all release channels" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 33 + deletions_count: 27 + }, { + sha: "2921e9a88e07e5a84294fdd36300c0cbf8bb294d" + date: "2019-09-22 15:53:46 +0000" + description: "cleanup unused variables in release-s3.sh script" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "a83a75003b41a881f87b7f2a053a9c43e040e1bc" + date: "2019-09-23 16:50:14 +0000" + description: "rename config tag" + pr_number: 902 + scopes: ["add_fields transform"] + type: "fix" + breaking_change: false + author: "Alexey Suslov" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "c830b956409b5f64d83c2ddd5056a5deaec1e609" + date: "2019-09-23 18:39:02 +0000" + description: "default config path \"/etc/vector/vector.toml\"" + pr_number: 900 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 7 + deletions_count: 1 + }, { + sha: "2f187234ee024398997a6c4defac0ad38a234ac3" + date: "2019-09-23 15:39:08 +0000" + description: "Add release-commit make target" + pr_number: 911 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 31 + insertions_count: 387 + deletions_count: 97 + }, { + sha: "f942dfaca06a3de66ca593d99b5f04ccd4638e95" + date: "2019-09-23 15:43:15 +0000" + description: "Remove $VERSION from package-deb" + pr_number: 910 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8b2a11ee9ba0c3204deefa3d0435120873808089" + date: "2019-09-23 23:44:23 +0000" + description: "Use OpenSSL instead of LibreSSL for x86_64-unknown-linux-musl" + pr_number: 904 + scopes: ["operations"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 6 + deletions_count: 34 + }, { + sha: "d35ddfff2edbc4f776a75cc420f834a6f4d2aec4" + date: "2019-09-23 17:04:49 +0000" + description: "Remove ARMv7 support -- for now" + pr_number: 913 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 0 + deletions_count: 274 + }, { + sha: "88769c9049da01560866a17f806403df46ca43fe" + date: "2019-09-23 21:15:14 +0000" + description: "Add libssl-dev to musl builder" + pr_number: 917 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 63 + deletions_count: 63 + }, { + sha: "b0089e2509a5dc05155f4a11ed99439055b43eea" + date: "2019-09-23 22:35:16 +0000" + description: "Remove $VERSION when building archives" + pr_number: 918 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 2 + deletions_count: 8 + }, { + sha: "027836100a44874fc1989296f49777203f0a722a" + date: "2019-09-24 16:21:20 +0000" + description: "Use vendored OpenSSL for x86_64-unknown-linux-musl CI build" + pr_number: 919 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "4e256e2d3e9bd6aa91484f093b5b7fae894b9bf5" + date: "2019-09-24 09:59:45 +0000" + description: "add types to semantic.yml" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 20 + deletions_count: 6 + }, { + sha: "230f3250cb1e109446ef017f82794466e3e070c2" + date: "2019-09-24 10:05:23 +0000" + description: "verify builds by default" + pr_number: 914 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 32 + deletions_count: 4 + }, { + sha: "f6b0739ebcabce1c768a2e3a97f2e6ee30119e4c" + date: "2019-09-24 10:11:23 +0000" + description: "use enhancement not improvement" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 11 + deletions_count: 3 + }, { + sha: "3450767465c7a58bb46631a8b922bb33d0b585c2" + date: "2019-09-24 11:03:56 +0000" + description: "Prepare v0.4.0 release" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 5 + insertions_count: 671 + deletions_count: 2 + }, { + sha: "c512e286e6a864911683bde5cdec4744f154966d" + date: "2019-09-24 10:39:24 +0000" + description: "fix s3 compression and endpoint options" + pr_number: 921 + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 7 + insertions_count: 73 + deletions_count: 69 + }, { + sha: "ced248773ab9a04d862a22dd4b80dfde5c9e8de3" + date: "2019-09-24 12:21:29 +0000" + description: "update release-github to include release notes" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 106 + deletions_count: 0 + }, { + sha: "3ea589a0d8ddc58d9b4caa167b0ef84fab99c84e" + date: "2019-09-24 12:24:53 +0000" + description: "use common setup.rb script for boiler plate setup" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 7 + deletions_count: 116 + }] + } + "0.9.1": { + date: "2020-04-29" + codename: "" + whats_next: [] + commits: [{ + sha: "4d76e751febd778887a7432263f77369895cd093" + date: "2020-04-22 14:37:44 +0000" + description: "Support millisecond and nanosecond timestamps" + pr_number: 2382 + scopes: ["splunk_hec source", "splunk_hec sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 71 + deletions_count: 4 + }, { + sha: "b1c8421357502e1eca123e98787e7071109620f4" + date: "2020-04-22 15:13:54 +0000" + description: "Handle missing source timestamp" + pr_number: 2387 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 30 + deletions_count: 4 + }, { + sha: "fc2c77b643e02b86a99cff4c914df06060a49d52" + date: "2020-04-23 15:27:26 +0000" + description: "`enoding.only_fields` should properly handle parent keys" + pr_number: 2413 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 110 + deletions_count: 49 + }, { + sha: "48a6d142e9a8ff441d3379cecba7272152b74a72" + date: "2020-04-27 13:31:51 +0000" + description: "add text encoding" + pr_number: 2468 + scopes: ["humio_logs sink"] + type: "enhancement" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 28 + deletions_count: 74 + }, { + sha: "47bf9f74903162a02f40cd7113c37cfec6bb4303" + date: "2020-04-27 20:05:10 +0000" + description: "Use header auth" + pr_number: 2443 + scopes: ["datadog_metrics sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 7 + deletions_count: 3 + }, { + sha: "332e9711c7a7c414a0ee83257d172c9b79f1244b" + date: "2020-04-28 19:08:02 +0000" + description: "Add indexed fields in `text` encoding" + pr_number: 2448 + scopes: ["splunk_hec sink"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 67 + deletions_count: 22 + }, { + sha: "fafdc789b26e23075aa6afc1b12622b001f0f5c4" + date: "2020-04-28 14:04:02 +0000" + description: "Treat empty namespaces as not set" + pr_number: 2479 + scopes: ["aws_ec2_metadata transform"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 48 + deletions_count: 8 + }, { + sha: "4e55cbb5b4879bcd971787089873cd62f7ddc451" + date: "2020-04-29 11:30:48 +0000" + description: "Fix handling of standard AWS regions" + pr_number: 2489 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 98 + deletions_count: 31 + }, { + sha: "a8fba10bc739fb5f9b54264bab937700e161f5d5" + date: "2020-04-29 13:42:40 +0000" + description: "Fetch system ca certs via schannel on windows" + pr_number: 2444 + scopes: ["networking"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 163 + deletions_count: 11 + }, { + sha: "9082b1808115bac6dd64f310126fb57754ce315e" + date: "2020-04-29 15:29:34 +0000" + description: "Move healtcheck consumer creation to boxed future" + pr_number: 2499 + scopes: ["pulsar sink"] + type: "fix" + breaking_change: false + author: "Evan Cameron" + files_count: 1 + insertions_count: 13 + deletions_count: 17 + }, { + sha: "b2bc1b77ac53b412162a845293487586f66b3007" + date: "2020-04-29 22:26:19 +0000" + description: "Add `instance-type` field" + pr_number: 2500 + scopes: ["aws_ec2_metadata transform"] + type: "enhancement" + breaking_change: false + author: "Slawomir Skowron" + files_count: 1 + insertions_count: 16 + deletions_count: 0 + }, { + sha: "af544f361cc03e31207fcdd5e57104d051fde136" + date: "2020-04-30 10:51:02 +0000" + description: "Use specific error for x509 from system ca" + pr_number: 2507 + scopes: ["security"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 10 + deletions_count: 5 + }, { + sha: "a0d5cf5469045d066bed5ed950187ff6a7612dc4" + date: "2020-04-30 12:55:08 +0000" + description: "Shutdown topology pieces before building new ones" + pr_number: 2449 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 349 + deletions_count: 114 + }, { + sha: "319a75ddc20060a8aecb2d0e990d3e52b19cc0e5" + date: "2020-04-30 13:28:53 +0000" + description: "Enforce age requirements" + pr_number: 2437 + scopes: ["aws_cloudwatch_logs sink"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 195 + deletions_count: 34 + }, { + sha: "fcd5c1893713e08d1ee0f51cdca5aa16686af148" + date: "2020-04-30 11:17:14 +0000" + description: "Check code on Windows" + pr_number: 2506 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 640 + deletions_count: 605 + }] + } + "0.5.0": { + date: "2019-10-10" + codename: "Platform Mingling" + whats_next: [] + commits: [{ + sha: "89c303748f100c881e6e1cb921e3d64870d89ca3" + date: "2019-09-24 12:28:01 +0000" + description: "Update releaser Dockerfile to include Ruby and the necessary gems" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 10 + deletions_count: 2 + }, { + sha: "5f251260ede2331a19e20d1319e9484bebd6f890" + date: "2019-09-24 14:58:36 +0000" + description: "Add git to musl builder" + pr_number: 923 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "eab5a1a6c20ea7ec30b2e7f17c622d61e5f74613" + date: "2019-09-24 16:28:40 +0000" + description: "Fix github release notes" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 9 + insertions_count: 51 + deletions_count: 21 + }, { + sha: "b88e0563acf439f1503c0380f2612fdf398ff134" + date: "2019-09-24 16:46:51 +0000" + description: "Update release download URLs" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 22 + deletions_count: 12 + }, { + sha: "b2e4ccc78d8e8df3507abf3a3e2a9c44b3a37e7e" + date: "2019-09-24 15:03:33 +0000" + description: "Show information about why a retry needs to happen" + pr_number: 835 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 36 + deletions_count: 23 + }, { + sha: "b25a22e71417df6bb3889f6ff1208cbf6f73232f" + date: "2019-09-24 17:12:30 +0000" + description: "Make encoding non-optional" + pr_number: 894 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 23 + insertions_count: 542 + deletions_count: 707 + }, { + sha: "222fe08358566f677e342e9553ce5421597cdfaa" + date: "2019-09-24 17:58:04 +0000" + description: "add version to readme" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 10 + insertions_count: 146 + deletions_count: 82 + }, { + sha: "7c10d204cd0cf821a38f3ae6f903f346d94a1d87" + date: "2019-09-24 18:08:03 +0000" + description: "Update installation readme link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 4 + deletions_count: 3 + }, { + sha: "f8ba06b75daf3d7d3be9c47d9762b8ec8dae7c55" + date: "2019-09-24 20:39:03 +0000" + description: "Recommend a new version based on pending commits" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 81 + deletions_count: 46 + }, { + sha: "597f989dc0900c08b099f62107ce53a5508e9933" + date: "2019-09-24 23:02:18 +0000" + description: "Use proper category in changelog for new components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 19 + insertions_count: 461 + deletions_count: 25 + }, { + sha: "55582e52e1e8856b75702ffce6b56218ac82ddaf" + date: "2019-09-25 09:57:05 +0000" + description: "Initial `statsd` implementation" + pr_number: 821 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Alexey Suslov" + files_count: 16 + insertions_count: 776 + deletions_count: 1 + }, { + sha: "eff3bf23a9dbdbf1c01b2744ad0a489542533841" + date: "2019-09-25 09:54:36 +0000" + description: "Fix incorrect description of kafka option" + pr_number: 926 + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 4 + insertions_count: 14 + deletions_count: 18 + }, { + sha: "dd74e64f1d00c1032a7a470f40f4b7aea57b1d86" + date: "2019-09-26 00:08:25 +0000" + description: "Add OpenSSL to x86_64-unknown-linux-musl buil…" + pr_number: 927 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 53 + deletions_count: 4 + }, { + sha: "630d841a4dce90df195abfab53722f61b8b192a2" + date: "2019-09-25 16:01:03 +0000" + description: "Add support for TLS (SSL)" + pr_number: 912 + scopes: ["kafka sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 14 + insertions_count: 409 + deletions_count: 10 + }, { + sha: "43d04fc4b5a9855c936b5c63e470c3b78206b227" + date: "2019-09-25 18:34:20 +0000" + description: "Use PKCS#12 keys instead of JKS" + pr_number: 934 + scopes: ["kafka sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 64 + deletions_count: 23 + }, { + sha: "5fa0161e537f33010e8116cb5c6782c721701c29" + date: "2019-09-25 20:45:45 +0000" + description: "Fix nightly builds link" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "880d6401ac9705760e140dfb2537646078fb3eb0" + date: "2019-09-26 08:52:41 +0000" + description: "Create SECURITY.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 7 + deletions_count: 0 + }, { + sha: "af77005bc7cbf908c271a826e4cd5caee7b45072" + date: "2019-09-26 14:53:56 +0000" + description: "Fix install script path export" + pr_number: 891 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Cédric Da Fonseca" + files_count: 1 + insertions_count: 9 + deletions_count: 2 + }, { + sha: "6353e49126dc5f575194783870ab06f1e9e3354a" + date: "2019-09-26 09:02:45 +0000" + description: "Simplify changelog TOC" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 3 + insertions_count: 15 + deletions_count: 50 + }, { + sha: "ec7e488213fc8e9c04798174a00318aa3d9b84b8" + date: "2019-09-26 09:57:05 +0000" + description: "Update to rust 1.38.0" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "dda0309ced633cfd0a7b810c19733e02e8f09fbe" + date: "2019-09-26 10:15:13 +0000" + description: "Fix fmt errors for 1.38.0" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "da8802836f5b9085c776eeb80d12d2c9fa1ab266" + date: "2019-09-26 17:30:51 +0000" + description: "Improve installation docs" + pr_number: 942 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 23 + insertions_count: 419 + deletions_count: 200 + }, { + sha: "05395b070b3eb3cf4f32f61423aae99ad26dc773" + date: "2019-09-26 17:40:46 +0000" + description: "Link to README.md file in SUMMARY.md" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 4 + insertions_count: 59 + deletions_count: 54 + }, { + sha: "9f1c2b78847d0f0122ea1f8e6c9e2f93db0053f8" + date: "2019-09-26 17:57:02 +0000" + description: "Fix broken docs links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 27 + insertions_count: 63 + deletions_count: 241 + }, { + sha: "0f8aaecea209105a58693f0360c43d08fd594263" + date: "2019-09-26 18:00:13 +0000" + description: "Ensure .rpm packages are built in nightly builds" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 21 + deletions_count: 5 + }, { + sha: "daa7ce711f6b3d50a4e1a75eda15ba0d8bd95973" + date: "2019-09-26 18:04:34 +0000" + description: "fix broken tabs on yum and apt pages" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "87737e23261437d7f1bdb0bba0662cfd3884098e" + date: "2019-09-26 18:08:59 +0000" + description: "fix download links for deb and rpm packages" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 6 + insertions_count: 218 + deletions_count: 11 + }, { + sha: "296411c14eceb799c26ac478aee9f6d302bea515" + date: "2019-09-26 18:18:56 +0000" + description: "Update SECURITY.md with better info" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 25 + deletions_count: 3 + }, { + sha: "61520b2cfabb8c3345dcf896df620906ceb55d4c" + date: "2019-09-26 23:02:02 +0000" + description: "Docker images use binaries" + pr_number: 940 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 29 + deletions_count: 80 + }, { + sha: "f6e1050d8a7fdd41a844ae9ba496ad1cd2bb10ce" + date: "2019-09-27 11:28:24 +0000" + description: "Remove setting VERSION for `make generate`" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "089bb5a2a4fbc8fa1522781b0982a9a9ca58e479" + date: "2019-09-27 16:07:58 +0000" + description: "Add `fix` as a valid PR type" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 0 + }, { + sha: "4c9917754edd71a4ef53b9778d4540e3736d0abb" + date: "2019-09-28 19:08:07 +0000" + description: "Clean up debian user creation and unit file" + pr_number: 947 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "7653c6bbd61f3859d651d6cab21e43d5612cf6c7" + date: "2019-09-30 18:08:46 +0000" + description: "Update tokio versions" + pr_number: 949 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 70 + deletions_count: 34 + }, { + sha: "07339e111633e783c71484c83f8f0193a9167716" + date: "2019-10-01 18:20:10 +0000" + description: "Use stable Rust 1.38.0 and update Linux headers for x86_6…" + pr_number: 945 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 4 + deletions_count: 7 + }, { + sha: "00818013b6d8a9acfa578ce80a2ef5fa5cf9505d" + date: "2019-10-02 00:35:52 +0000" + description: "Tarball URL address for the Linux installation script" + pr_number: 957 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Fernando Schuindt" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "8f185695e084b1be5da753e9fca2c831cace3bac" + date: "2019-10-02 11:53:44 +0000" + description: "Add support for TLS options" + pr_number: 953 + scopes: ["elasticsearch sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 14 + insertions_count: 575 + deletions_count: 213 + }, { + sha: "2d5736e8f1e57ffe573c07cfcdc77e0c67dc84e9" + date: "2019-10-02 13:59:12 +0000" + description: "Ensure released s3 files are public-read" + pr_number: 959 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 12 + deletions_count: 4 + }, { + sha: "e1608701e298baf2d452689ca9fec9f1f0fb4c02" + date: "2019-10-02 17:19:40 +0000" + description: "Sync and verify install.sh" + pr_number: 958 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 428 + deletions_count: 140 + }, { + sha: "f20d68a6ba153df599e525f15e18baebe624585f" + date: "2019-10-02 17:42:08 +0000" + description: "Remove APT, YUM, and PackageCloud" + pr_number: 961 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 21 + insertions_count: 61 + deletions_count: 265 + }, { + sha: "ce2174996a583ae27b8c04b998f59abc47f5634a" + date: "2019-10-02 22:35:35 +0000" + description: "Add SSE and public-read ACL to install.sh" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8dae324901f7fb4913ca68e723d6aeea814e76f3" + date: "2019-10-03 11:09:26 +0000" + description: "Verify installation script on mac" + pr_number: 965 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 15 + deletions_count: 0 + }, { + sha: "eebcbd4c1fa1296a0bfe152a2141c253cbb76d88" + date: "2019-10-03 11:57:52 +0000" + description: "Verify that sh.vector.dev works" + pr_number: 964 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 22 + deletions_count: 1 + }, { + sha: "eedbb2c650f75406b86a9da9f1d7de48550dcf7e" + date: "2019-10-03 12:11:17 +0000" + description: "Create missing .md file for new components" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 7 + deletions_count: 0 + }, { + sha: "5c5ad89d74a7ec0069e0b41ba8adccc20b5ecf55" + date: "2019-10-04 11:18:38 +0000" + description: "Verify and check Homebrew install" + pr_number: 969 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 26 + deletions_count: 7 + }, { + sha: "d5974dc4198abd22bf6b920fc380a087cc150137" + date: "2019-10-04 23:30:25 +0000" + description: "Add support for basic auth" + pr_number: 937 + scopes: ["clickhouse sink"] + type: "feat" + breaking_change: false + author: "albert" + files_count: 5 + insertions_count: 100 + deletions_count: 4 + }, { + sha: "c216022f600dbdf7aec8f8bb2fd7e9320584ed16" + date: "2019-10-05 13:10:20 +0000" + description: "Use sudo when checking internet install" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "7449992216e3c8812f2ed24d4ddda11c799e50e9" + date: "2019-10-07 16:42:28 +0000" + description: "Update cloudwatch metrics docs" + pr_number: 968 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexey Suslov" + files_count: 8 + insertions_count: 99 + deletions_count: 6 + }, { + sha: "2e6516f844247a18a7885dfeafc5f4d118687845" + date: "2019-10-07 20:22:14 +0000" + description: "Properly verify that the Vector Systemd service started" + pr_number: 982 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 25 + deletions_count: 19 + }, { + sha: "8c2ef3e5412159289be79a5521ffd43c65be812b" + date: "2019-10-07 22:33:01 +0000" + description: "Dont auto-update when testing Homebrew install" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "8a6705aefbf8e00c99107ba5038cb1022d85cd7e" + date: "2019-10-08 00:31:46 +0000" + description: "Fix Docker builds" + pr_number: 985 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 20 + insertions_count: 430 + deletions_count: 169 + }, { + sha: "8cc531da55aa1d948e785af0dec1ba74bef165e0" + date: "2019-10-08 00:37:07 +0000" + description: "Fix failing verify-install-on-internet check" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "76e396aeb5d89f227b25467fdb86293e5e5c1a95" + date: "2019-10-08 00:38:40 +0000" + description: "Fix vector docker image name reference" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "ddc27bb670e86713c03554ffe081dd1e873a7de9" + date: "2019-10-08 17:18:19 +0000" + description: "Initial `docker` source implementation" + pr_number: 787 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 23 + insertions_count: 1537 + deletions_count: 9 + }, { + sha: "74b654606b39a7554c53c07b585d0cd9be3b76f7" + date: "2019-10-08 11:33:32 +0000" + description: "Unify the different TLS options" + pr_number: 972 + scopes: ["security"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 25 + insertions_count: 905 + deletions_count: 540 + }, { + sha: "476fb7e436f1b285ccff3dc52e21a8b1f36ab458" + date: "2019-10-08 16:08:22 +0000" + description: "Default data_dir to /var/lib/vector" + pr_number: 995 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 8 + deletions_count: 1 + }, { + sha: "b541bb1a4097d22f3efa9d74ccaf28cabcbe6466" + date: "2019-10-08 18:21:36 +0000" + description: "Add rate limited debug messages" + pr_number: 971 + scopes: ["observability"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 33 + insertions_count: 422 + deletions_count: 630 + }, { + sha: "389a65072cea2b7d3bafe70a52597d83925251e6" + date: "2019-10-08 18:33:39 +0000" + description: "Fix release script bug" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "3a86fdae3f5d72d001ba16b9683514e571a7c105" + date: "2019-10-08 18:48:29 +0000" + description: "Prepare v0.5.0 release" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 10 + insertions_count: 55 + deletions_count: 204 + }, { + sha: "df6018be6c1c964692d3ea071f4d95fb21f1cb14" + date: "2019-10-08 19:06:31 +0000" + description: "Add 0.5.0 release metadata" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 1 + insertions_count: 61 + deletions_count: 0 + }, { + sha: "5a4d50b022db116a0155efafb6aaaa34e4882600" + date: "2019-10-08 20:27:20 +0000" + description: "Remove unsupported bash flags" + pr_number: null + scopes: [] + type: "chore" + breaking_change: false + author: "Ben Johnson" + files_count: 2 + insertions_count: 0 + deletions_count: 6 + }, { + sha: "86d1d01bed23aa1496dcdab9c627d90c6c07e294" + date: "2019-10-09 11:36:55 +0000" + description: "Add sudo when installing via dpkg or rpm" + pr_number: 999 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 10 + deletions_count: 2 + }, { + sha: "a70603d11764ca49c4aa62bf3e50f7cf712c0018" + date: "2019-10-09 11:38:01 +0000" + description: "Add git to musl build image" + pr_number: 997 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "2cb0e44c69c776679dc19d41af8aecee42169e75" + date: "2019-10-09 15:52:26 +0000" + description: "Fix centos verifier" + pr_number: 1001 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 15 + insertions_count: 37 + deletions_count: 107 + }] + } + "0.9.2": { + date: "2020-06-02" + codename: "" + whats_next: [] + commits: [{ + sha: "5b821916cbd6583fa7375cc9daba6bfe2031f4f8" + date: "2020-05-27 11:22:47 +0000" + description: "ack and don't stop on request errors" + pr_number: 2666 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 56 + deletions_count: 17 + }] + } + "0.7.2": { + date: "2020-01-31" + codename: "" + whats_next: [] + commits: [{ + sha: "eee9300712181a6be96f894952bca55290f68947" + date: "2020-01-24 09:40:28 +0000" + description: "Fix typo" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "e1f71a59ab4d3839297938e5937bb2c60722844f" + date: "2020-01-24 05:10:35 +0000" + description: "Wrap failing test in feature flag" + pr_number: 1595 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 3 + insertions_count: 2 + deletions_count: 3 + }, { + sha: "dada48a66c814c0fd3e031a709e07db4b81b3e71" + date: "2020-01-24 13:17:59 +0000" + description: "Ensure that own logs are always ignored" + pr_number: 1525 + scopes: ["docker source"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 53 + deletions_count: 59 + }, { + sha: "eae248d09318af9f14319ee42c5a81a800dcce44" + date: "2020-01-24 15:55:58 +0000" + description: "Fix `release-github` CI job" + pr_number: 1600 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 4 + deletions_count: 2 + }, { + sha: "fca31a736b4056f2150b6e4abfcea1d87b5a97db" + date: "2020-01-24 21:32:15 +0000" + description: "Fix `release-homebrew` CI job" + pr_number: 1601 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 5 + deletions_count: 6 + }, { + sha: "bf1386cc151b130efde27c60122baa98b69cab61" + date: "2020-01-25 15:21:53 +0000" + description: "Fix socket address documentation" + pr_number: 1607 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 31 + insertions_count: 76 + deletions_count: 65 + }, { + sha: "201b9584d5c48d6bd687fa98749d9ab7ec46376d" + date: "2020-01-31 18:30:48 +0000" + description: "Fix group creation in the RPM package" + pr_number: 1654 + scopes: ["platforms"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "3245ad9c62ed4dcebb8482d8a9ac0fa045e44a1c" + date: "2020-01-31 21:56:09 +0000" + description: "Bump version in `Cargo.toml` and `Cargo.lock`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "87f0f732e988c936093477ce6fe630b4b830b61a" + date: "2020-01-29 11:08:27 +0000" + description: "Disable kuberenetes tests in CI temporarily" + pr_number: 1629 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 35 + deletions_count: 39 + }, { + sha: "c1b8f5ee96de2503cd10d2967d33131d99189ac1" + date: "2020-01-31 22:16:51 +0000" + description: "Fix creation of the tag in `make release`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "25d5fa64a565ac8ab26b5a10ddce9f24df1d5a62" + date: "2020-01-31 22:24:27 +0000" + description: "Use all tags in `make release`" + pr_number: null + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1020b033b2bb14ed2e135835abd4533ccd52f7ce" + date: "2020-01-27 13:13:47 +0000" + description: "Improve 0.7.1 release notes" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "binarylogic" + files_count: 5 + insertions_count: 73 + deletions_count: 73 + }] + } + "0.9.0": { + date: "2020-04-17" + codename: "The Homestretch" + whats_next: [{ + title: "Kubernetes!" + description: "We're doing this integration right. We've been alpha testing with a number of large clusters and we've honed in on a very high-quality approach that reflects the UX Vector is going after. You can learn more in our [Kubernetes Integration RFC][urls.pr_2222]. Feedback is welcome!" + }, { + title: "WASM Driven Plugin Architecture" + description: "This release included an overhaul of our [`lua` transform][docs.transforms.lua], which represents a big step towards making Vector more flexible. Lua is an excellent language when you need some quick scripting capabilities. But what if you need something more powerful? Like writing your own custom source, transform, or sink? Vector plans to solve this with [WASM][urls.wasm] foreign module support, enabling you to extend Vector in the language of your choice with little performance penalty. ❤️ WASM. Check out the [WASM Foreign Module Support RFC][urls.pr_2341] for more info." + }, { + title: "Dynamic HTTP Rate-Limiting (AIMD)" + description: "Fiddling with rate-limits is a frustratin endaevor. If you set them too high you risk overwhelming the service; too low and you're unecessarily limiting throughput. And what happens if you deploy a few more Vector instances? Vector is planning to solve this by automatically detecting the optimal rate limits, taking inspiration from TCP congestional control algorithms. Check out the [Dynamic HTTP Rate Limiting RFC][urls.pr_2329]." + }, { + title: "Vector Observability" + description: """ + Did you know this release includes a new hidden `internal` source that emits metrics reflecting Vector's own internal state? 👀 We didn't highlight this source because we have not finished instrumenting Vector and we'll be changing the internal schema used. + + Fun fact, we took an event-driven approach to Vector's internal observability, which we strongly believe in. We want Vector to be a good example of our own advice. Checkout the [Event-Driven Observability RFC][urls.pr_2093]. + """ + }] + commits: [{ + sha: "177bd7de7e38a24c4a6092c85b8f5eb9d0f5386f" + date: "2020-02-25 21:55:37 +0000" + description: "Allow setting individual `log_schema.*` options" + pr_number: 1923 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 11 + deletions_count: 1 + }, { + sha: "12cc1ca32d56853a99ff6916dcca49466e881dc4" + date: "2020-02-26 10:31:46 +0000" + description: "Restructure getting started guide" + pr_number: 1854 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 198 + deletions_count: 78 + }, { + sha: "fb4bab3b95e6e5eecb7a1a4a5838c76b519f1edb" + date: "2020-02-26 14:07:37 +0000" + description: "Add behavior test for the `merge` transform" + pr_number: 1820 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 71 + deletions_count: 0 + }, { + sha: "33d529feb01e37e98bec3761604a1ca125b3c3df" + date: "2020-02-26 08:43:47 +0000" + description: "Add TLS support to socket, syslog, and vector sources" + pr_number: 1892 + scopes: ["socket source", "syslog source", "vector source"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 18 + insertions_count: 890 + deletions_count: 50 + }, { + sha: "38864efa4152a1f44154431bf250ddf1c67b91c6" + date: "2020-02-26 18:00:02 +0000" + description: "Replace `flatten`/`unflatten` by native nesting" + pr_number: 1902 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 35 + insertions_count: 1083 + deletions_count: 662 + }, { + sha: "d610ee18801f7306be49841b93ccc02aac926d75" + date: "2020-02-26 19:09:59 +0000" + description: "Upgrade rdkafka to 0.23.1" + pr_number: 1928 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 4 + insertions_count: 94 + deletions_count: 33 + }, { + sha: "271bcbd13d81e728b814d1273784147e5647a2b5" + date: "2020-02-26 20:08:19 +0000" + description: "Upgrade lib/file-source to futures 0.3" + pr_number: 1934 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 4 + insertions_count: 14 + deletions_count: 14 + }, { + sha: "0b92159518732a27837ca1142884ab9a7d34de84" + date: "2020-02-26 20:35:49 +0000" + description: "Rename futures to futures01" + pr_number: 1933 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 87 + insertions_count: 231 + deletions_count: 229 + }, { + sha: "45b88d47ea8f01605d8e2e5ba7cf8f011f637853" + date: "2020-02-26 12:07:44 +0000" + description: "Rewrite to use HttpSink and JsonArrayBuffer" + pr_number: 1925 + scopes: ["gcp_pubsub sink"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 81 + deletions_count: 114 + }, { + sha: "6b5a5e238f266286227695b488a4266f1fd73bba" + date: "2020-02-26 15:01:18 +0000" + description: "Allow transforms to implement `Stream`" + pr_number: 1938 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 24 + deletions_count: 8 + }, { + sha: "5df7c74c6206ddc33afe3c1840e62258bae9baf2" + date: "2020-02-26 19:48:41 +0000" + description: "Improve Vector README to clarify purpose and position" + pr_number: 1943 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 216 + deletions_count: 72 + }, { + sha: "c9a4fcfe9f9e3e042497a94259dc492dd62b3eef" + date: "2020-02-27 11:32:48 +0000" + description: "Add advanced configs guide" + pr_number: 1774 + scopes: [] + type: "docs" + breaking_change: false + author: "Ashley Jeffs" + files_count: 4 + insertions_count: 182 + deletions_count: 0 + }, { + sha: "b6d197a5b959ce45cd8a4c952c360bfa1c6c22c0" + date: "2020-02-27 11:51:30 +0000" + description: "Add docs for NixOS" + pr_number: 1946 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 14 + insertions_count: 323 + deletions_count: 2 + }, { + sha: "5e02ace8c08e9eb703b63908ada9a7663f686faa" + date: "2020-02-28 09:27:46 +0000" + description: "Increase test logs size" + pr_number: 1949 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 27 + deletions_count: 22 + }, { + sha: "1fca6dcd0cd7f94785448128eb2f06d9dccc0bd4" + date: "2020-02-28 08:35:00 +0000" + description: "Default to `check_fields` condition when specifying conditions" + pr_number: 1947 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 18 + insertions_count: 30 + deletions_count: 41 + }, { + sha: "5ecbc2843ce8f61553943837782e296031b025e1" + date: "2020-02-28 10:47:04 +0000" + description: "Add `is_log` and `is_metric` conditions" + pr_number: 1950 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 8 + insertions_count: 177 + deletions_count: 15 + }, { + sha: "a2b1ef7479c3b49b9734be9dd0f9438ad343a478" + date: "2020-02-28 10:30:59 +0000" + description: "Add `--log-format` CLI option" + pr_number: 1908 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 60 + deletions_count: 14 + }, { + sha: "f07a3d22c9bba01612cc330abaff928df7dce8b2" + date: "2020-02-28 19:01:49 +0000" + description: "Create feature flags for all components enabling custom Vector builds" + pr_number: 1924 + scopes: ["operations"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 32 + insertions_count: 619 + deletions_count: 71 + }, { + sha: "fdae428c4b8aca0c748ba20ff08478062d327a4b" + date: "2020-02-28 20:48:55 +0000" + description: "Add TEST_LOG to test-stable CI job" + pr_number: 1937 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 2 + insertions_count: 5 + deletions_count: 0 + }, { + sha: "e4c83635166e777e53e4813448258c7fbcd6820a" + date: "2020-02-28 13:03:15 +0000" + description: "Check kube and docker code" + pr_number: 1914 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 2 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "560fd106fc9a60c12ddf2c32e31ad4f2031ff1f5" + date: "2020-02-28 21:55:08 +0000" + description: "Upgrade to Rust 1.41.1" + pr_number: 1958 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "1944ae977dd4143004434589ce4f27cb2714cbaf" + date: "2020-03-02 18:34:25 +0000" + description: "Initial `http` source implementation" + pr_number: 1650 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "gedkins" + files_count: 16 + insertions_count: 887 + deletions_count: 5 + }, { + sha: "e3cf8a7ef7712b054e0e37ca2caaa8e7933678ff" + date: "2020-03-03 13:24:09 +0000" + description: "Remove Google Analytics" + pr_number: 1976 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 0 + deletions_count: 3 + }, { + sha: "9d6c7e3288e013aedd672c7832b46e1291ea01ec" + date: "2020-03-03 14:16:28 +0000" + description: "Add Privacy Policy" + pr_number: 1977 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 8 + insertions_count: 95 + deletions_count: 4 + }, { + sha: "8d93004cf95163695eb83778090c18369f7c3bf0" + date: "2020-03-04 09:14:00 +0000" + description: "Add `--fragment` flag to `generate` subcmd" + pr_number: 1956 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 40 + deletions_count: 12 + }, { + sha: "f81b205a82976e46a2dfe7e3f4422ae1563f51d7" + date: "2020-03-04 09:14:51 +0000" + description: "Allow names in `generate` subcmd expression" + pr_number: 1955 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 65 + deletions_count: 12 + }, { + sha: "f5663be4b0d90809ef9d73dc18d26844d82f500e" + date: "2020-03-04 15:53:57 +0000" + description: "Add `check-component-features` target to the main Makefile" + pr_number: 1981 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 3 + deletions_count: 0 + }, { + sha: "b22e4d39a60b3e35e605d1f3997c32e14886853d" + date: "2020-03-04 11:09:59 +0000" + description: "Add TLS support" + pr_number: 1968 + scopes: ["logplex source", "splunk_hec source"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 15 + insertions_count: 731 + deletions_count: 196 + }, { + sha: "23968607e2382561e94b0f5f56b634bf02b046bd" + date: "2020-03-04 18:36:16 +0000" + description: "Add new `encoding.only_fields` and `encoding.except_fields` options" + pr_number: 1915 + scopes: ["sinks"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 72 + insertions_count: 4148 + deletions_count: 652 + }, { + sha: "af7756389127cf51acdab175f67c573af6ef921d" + date: "2020-03-05 12:48:59 +0000" + description: "Add RFC process" + pr_number: 1961 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 140 + deletions_count: 21 + }, { + sha: "9f793a7fc929a346ae726fce19efd7279b6f241e" + date: "2020-03-06 10:40:32 +0000" + description: "Move all TLS support over to openssl" + pr_number: 1986 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 36 + insertions_count: 776 + deletions_count: 374 + }, { + sha: "179801c8c09c06dd0759291815aca21a1ca794c5" + date: "2020-03-07 00:49:04 +0000" + description: "Describe dot notation in the docs" + pr_number: 1996 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 69 + deletions_count: 8 + }, { + sha: "3beb67e10c355af2d7bb69bba2fc1b56ac11aa85" + date: "2020-03-09 15:35:03 +0000" + description: "Add `contains` and `prefix` predicates" + pr_number: 1997 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ashley Jeffs" + files_count: 5 + insertions_count: 295 + deletions_count: 4 + }, { + sha: "50a171b19ccb86a8f3559975e154033a627e8c3f" + date: "2020-03-09 13:49:27 +0000" + description: "Update examples to use inline TOML" + pr_number: 2004 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 158 + insertions_count: 3362 + deletions_count: 2841 + }, { + sha: "eeffdb08cad3f0956a8ff6079aeb5a57ebf5220c" + date: "2020-03-09 12:21:42 +0000" + description: "Add support for compression" + pr_number: 1969 + scopes: ["kafka sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 14 + insertions_count: 172 + deletions_count: 87 + }, { + sha: "c86708675d1356fcbc512ec15174bf12d842c121" + date: "2020-03-09 17:57:39 +0000" + description: "Cleanup field requirements" + pr_number: 2013 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 98 + insertions_count: 1908 + deletions_count: 1882 + }, { + sha: "33c6345b1026021b5a94b932c42f20c97c66e8d8" + date: "2020-03-09 21:07:19 +0000" + description: "Fix examples and clarify nested behavior" + pr_number: 1905 + scopes: ["rename_fields transform"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 31 + insertions_count: 485 + deletions_count: 221 + }, { + sha: "303ab5362eec135a5858c3caaa4f43f8f048a1b1" + date: "2020-03-10 10:18:26 +0000" + description: "Pull fresh containers on test" + pr_number: 2019 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "aa523b00977c89a44d7cbb17e7e7d0bf3e179c1a" + date: "2020-03-10 19:30:09 +0000" + description: "Fix race condition in test" + pr_number: 2026 + scopes: ["statsd sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 7 + deletions_count: 0 + }, { + sha: "0eb5023fc3136d97fad0353ff74fc865b24be96e" + date: "2020-03-10 22:10:08 +0000" + description: "Refactor to use util/HttpSource" + pr_number: 2014 + scopes: ["logplex source"] + type: "chore" + breaking_change: false + author: "Bill" + files_count: 1 + insertions_count: 70 + deletions_count: 52 + }, { + sha: "532c048e8cbced42b2699b6120f0981eff73c58e" + date: "2020-03-10 20:44:45 +0000" + description: "Initial `dedupe` transform implementation" + pr_number: 1848 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Spencer T Brody" + files_count: 15 + insertions_count: 1108 + deletions_count: 2 + }, { + sha: "b2d303d0f8c004dd0d9437c35f11eb9554bf4093" + date: "2020-03-10 21:53:22 +0000" + description: "Initial 'tag_cardinality_limit' transform implementation" + pr_number: 1959 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Spencer T Brody" + files_count: 14 + insertions_count: 1066 + deletions_count: 2 + }, { + sha: "dec0430fddf2a6603dc4d6fc08b2f45e7ccaa974" + date: "2020-03-11 11:47:26 +0000" + description: "Reshuffle timeout in test" + pr_number: 2035 + scopes: ["statsd sink"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 8 + deletions_count: 6 + }, { + sha: "0d9c2c98b8dfcb24ee08c0d0e536238208fe44dd" + date: "2020-03-11 12:13:28 +0000" + description: "Refactor internal http client" + pr_number: 2029 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 18 + insertions_count: 490 + deletions_count: 333 + }, { + sha: "6be3986d8d451f6630a679cd180d7434f706d66a" + date: "2020-03-11 10:44:28 +0000" + description: "Add support for TLS" + pr_number: 2025 + scopes: ["vector sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 322 + deletions_count: 13 + }, { + sha: "8e4986d21497a0dcc2ad87e517df6a50feb23086" + date: "2020-03-11 20:38:22 +0000" + description: "Add PR checklist" + pr_number: 2010 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 3 + insertions_count: 29 + deletions_count: 0 + }, { + sha: "1de3f2f2900c63390cde2a81a28c51ff6652ab7c" + date: "2020-03-12 11:38:53 +0000" + description: "Initial `papertrail` sink implementation" + pr_number: 1835 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 15 + insertions_count: 773 + deletions_count: 1 + }, { + sha: "f12df1571869a4014c39eff4b413555c0a54e05d" + date: "2020-03-12 11:52:29 +0000" + description: "Upgrade to rustc `1.42.0`" + pr_number: 2043 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "9e5e20f1c7667d1a890e7effcb83ebf86881dd62" + date: "2020-03-12 13:42:55 +0000" + description: "Add markdown linting" + pr_number: 2020 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 136 + insertions_count: 4009 + deletions_count: 1191 + }, { + sha: "31ee4126f750b1cd182baeab70fde05d88c139d9" + date: "2020-03-12 14:02:30 +0000" + description: "Add `timeout` query parameter" + pr_number: 2038 + scopes: ["elasticsearch sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "4d5f36e2f9816b42591dc6f0fba4baa1f6f82d6f" + date: "2020-03-12 14:03:30 +0000" + description: "Rename `host_field` to `host_key`" + pr_number: 2037 + scopes: ["splunk_hec sink"] + type: "chore" + breaking_change: true + author: "Bruce Guenter" + files_count: 4 + insertions_count: 53 + deletions_count: 9 + }, { + sha: "73ad2d1195fc14665cb3df2abe62fa9eb3c53112" + date: "2020-03-12 14:43:28 +0000" + description: "Fix the papertrail docs" + pr_number: 2051 + scopes: ["papertrail sink"] + type: "docs" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 32 + deletions_count: 5 + }, { + sha: "3f33a2d7943f483fc1806c26247c339fbec89fc6" + date: "2020-03-12 16:44:31 +0000" + description: "Check lockfile over cargo.toml" + pr_number: 2050 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "18986f2af9eca178d422dc8eab7e66672c2a5337" + date: "2020-03-12 17:41:38 +0000" + description: "Re-run doc generation" + pr_number: 2052 + scopes: ["docs"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "23f9fa7cfa3621765b6ddcfb690cd0d2e1182bae" + date: "2020-03-13 18:03:36 +0000" + description: "Add `target` directory to `.markdownlintignore`" + pr_number: 2054 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "122778d90ffb7644e6811543696c845a1fb0417d" + date: "2020-03-13 09:44:02 +0000" + description: "Add new `key_field` option" + pr_number: 2039 + scopes: ["sampler transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 6 + insertions_count: 112 + deletions_count: 16 + }, { + sha: "034790b921a763b2f78b1b220e415ca560fc0622" + date: "2020-03-13 22:03:09 +0000" + description: "Fix for \"download source code\" step in install from source" + pr_number: 2027 + scopes: [] + type: "docs" + breaking_change: false + author: "Yuriy Vasiyarov" + files_count: 2 + insertions_count: 10 + deletions_count: 22 + }, { + sha: "ffdbacb509d9057c74bbea0ae446f3fd7433a319" + date: "2020-03-13 21:10:42 +0000" + description: "Improve syslog source example" + pr_number: 2059 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 92 + deletions_count: 14 + }, { + sha: "f3f8567876192202e60c5ac377b98b76a04aba49" + date: "2020-03-14 14:27:59 +0000" + description: "Fix `check-component-features` test" + pr_number: 2061 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "163e001ca0a9de48da31ffd1e4013fb13e6027e3" + date: "2020-03-14 10:37:35 +0000" + description: "Initial `honeycomb` sink implementation" + pr_number: 1847 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 14 + insertions_count: 1102 + deletions_count: 1 + }, { + sha: "e4104549ee1c0bd56e3978adf458dfdd5c7c6506" + date: "2020-03-16 10:24:29 +0000" + description: "Force `symlink` re-evaluation when detecting a changed Vector config" + pr_number: 2034 + scopes: ["config"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 67 + deletions_count: 15 + }, { + sha: "874c0c079538ce39c8259e1ca477fab49d3acddd" + date: "2020-03-16 15:12:43 +0000" + description: "Add `version` configuration option" + pr_number: 2056 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 9 + insertions_count: 755 + deletions_count: 38 + }, { + sha: "606dedbb689c9480c3724df92040edf5217d1b8d" + date: "2020-03-16 11:46:51 +0000" + description: "Part 1 of `HttpSink` refactor" + pr_number: 2072 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 5 + insertions_count: 286 + deletions_count: 290 + }, { + sha: "21405209c288ef78fe91ccdb4948b972b5d23b5a" + date: "2020-03-16 12:23:29 +0000" + description: "Ensure white-space dooes not break parsing" + pr_number: 2060 + scopes: ["json_parser transform"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 19 + deletions_count: 0 + }, { + sha: "b237576ceb416a58ec85e8eaefc1b075ef7d1a52" + date: "2020-03-16 20:00:01 +0000" + description: "Fix insertion to arrays in field path notation" + pr_number: 2062 + scopes: ["transforms"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 35 + deletions_count: 7 + }, { + sha: "7ecec0efc404a326f93d315df07d49632d1bd752" + date: "2020-03-16 20:17:20 +0000" + description: "Support nested fields and arrays" + pr_number: 1936 + scopes: ["merge transform"] + type: "fix" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 90 + deletions_count: 0 + }, { + sha: "e598e1f5cfa35171da0873e36e65e462f8260701" + date: "2020-03-17 00:30:16 +0000" + description: "Update tagline" + pr_number: 2079 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 7 + insertions_count: 9 + deletions_count: 9 + }, { + sha: "c8a5e4f3631a9f483cfcfcb2f25c0d3de6480feb" + date: "2020-03-17 14:48:09 +0000" + description: "RFC #1999 - 2020-03-06 - API extensions for `lua` transform" + pr_number: 2000 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 702 + deletions_count: 0 + }, { + sha: "7dacc02c954d55810e1cfed00e76745f23747f69" + date: "2020-03-17 17:01:59 +0000" + description: "Support escaping in the field path notation" + pr_number: 2081 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 22 + deletions_count: 0 + }, { + sha: "45a5cf7b96f39327f5a511d546518dbcb05eba7a" + date: "2020-03-17 17:13:48 +0000" + description: "Upgrade `regex` dependency" + pr_number: 2083 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "d93c7240ecbcf0b338df60182c8909ab79d1367c" + date: "2020-03-17 10:23:35 +0000" + description: "Consolidate \"Configuration\" and \"Options\" sections" + pr_number: 2085 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 133 + insertions_count: 301 + deletions_count: 522 + }, { + sha: "b9483a9edbb7633ce6b6de2af427b48bef71dee6" + date: "2020-03-17 10:26:05 +0000" + description: "Upgrade dependencies" + pr_number: 2086 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 94 + deletions_count: 89 + }, { + sha: "dd55d9d51e2d3b5b12ac63cadd30613b946afa05" + date: "2020-03-17 11:28:55 +0000" + description: "Part 2 of `HttpSink` refactor" + pr_number: 2075 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 228 + deletions_count: 212 + }, { + sha: "07c42a100ca147aef93fa72fdddaca4a2dafd51c" + date: "2020-03-17 09:34:09 +0000" + description: "Fix handling of message array data" + pr_number: 2053 + scopes: ["journald source"] + type: "fix" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 67 + deletions_count: 22 + }, { + sha: "ff7b0fb416878ec40b163d2e2ee5aaf5464e3f15" + date: "2020-03-17 19:46:47 +0000" + description: "Use `--release` Cargo flag in `make build`" + pr_number: 2087 + scopes: ["setup"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "56bad9d02543fa0861df5262afcb59b09f12580c" + date: "2020-03-18 16:23:11 +0000" + description: "Pin tag for `loki` Docker image" + pr_number: 2091 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "ccd3cf7f08fdad22ca5cbdacf4647e29b4b83798" + date: "2020-03-19 02:02:55 +0000" + description: "Switch to `tokio-compat` to start the transition to our new async runtime" + pr_number: 1922 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 9 + insertions_count: 117 + deletions_count: 71 + }, { + sha: "e596cc1046496640ef6e4e38b58d5cb93cea5760" + date: "2020-03-18 16:39:41 +0000" + description: "Refactor source shutdown and make it two-phase" + pr_number: 1994 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Spencer T Brody" + files_count: 27 + insertions_count: 875 + deletions_count: 68 + }, { + sha: "b8f144527bda15e98e4e91d42d8ab6306169af85" + date: "2020-03-19 18:12:52 +0000" + description: "Add `*_flat` methods for log events" + pr_number: 2082 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 38 + deletions_count: 0 + }, { + sha: "9716e0bbf8d401e53fbb08492f8fa27611a8d889" + date: "2020-03-19 19:50:46 +0000" + description: "Upgrade file sink for tokio-compat" + pr_number: 1988 + scopes: ["file sink"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 12 + insertions_count: 620 + deletions_count: 478 + }, { + sha: "ba2ae6b9352c7499c6858a0257e3f8dd01bb522c" + date: "2020-03-19 10:41:35 +0000" + description: "Allow native TOML tables" + pr_number: 2068 + scopes: ["add_fields transform"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 54 + deletions_count: 50 + }, { + sha: "99bb140b345e7a1a51dfb2b3596cd9897c7721d2" + date: "2020-03-19 22:40:58 +0000" + description: "Correct doctest at src/stream.rs" + pr_number: 2097 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "07343ac9d7ad06e9704e5a12b8d96c873a300162" + date: "2020-03-19 23:30:59 +0000" + description: "Upgrade console sink for tokio 0.2" + pr_number: 2096 + scopes: ["console sink"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 8 + insertions_count: 75 + deletions_count: 34 + }, { + sha: "c8b47b3230a4b790d8cda0e47265771666581f98" + date: "2020-03-19 23:13:15 +0000" + description: "Initial `pulsar` sink implementation" + pr_number: 1665 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Evan Cameron" + files_count: 17 + insertions_count: 914 + deletions_count: 1 + }, { + sha: "c800c2c9b166dc78ade84dc22c152c1d0a0fff76" + date: "2020-03-20 20:39:37 +0000" + description: "Correct Cargo.lock" + pr_number: 2105 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "14c0a632ae09ce22cc3818c5f5ad9f09023571dc" + date: "2020-03-20 11:56:59 +0000" + description: "Add new `drop_empty` option to drop empty objects automatically" + pr_number: 2077 + scopes: ["remove_fields transform", "rename_fields transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 11 + insertions_count: 321 + deletions_count: 48 + }, { + sha: "9d6be3193dfbb9624d9549137802a7abdde8c40a" + date: "2020-03-20 21:33:38 +0000" + description: "Fix `x86_64-pc-windows-msvc` build" + pr_number: 2103 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 6 + deletions_count: 4 + }, { + sha: "1d6928c28bbe0ca929bb00ff98d83dcdd3cf650c" + date: "2020-03-20 21:40:09 +0000" + description: "Name the time specifiers as `strftime` in the templating docs" + pr_number: 2107 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 15 + insertions_count: 22 + deletions_count: 22 + }, { + sha: "838a80bd486e2112f05725d4306dc68b8e5a70c3" + date: "2020-03-20 14:15:26 +0000" + description: "Make RunningTopology::stop() signal all sources to shut down using the new ShutdownCoordinator (1091)" + pr_number: 2098 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Spencer T Brody" + files_count: 6 + insertions_count: 255 + deletions_count: 157 + }, { + sha: "2675d77241d7c9d5f6b7203a7d4f95ca0a925991" + date: "2020-03-23 20:02:15 +0000" + description: "Racy buffering tests correction" + pr_number: 2106 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 5 + insertions_count: 439 + deletions_count: 298 + }, { + sha: "3e881bfa2dc6959928aedc0e64e576b86ed8f4b0" + date: "2020-03-23 13:50:48 +0000" + description: "Initial `datadog_logs` sink implementation" + pr_number: 1832 + scopes: ["new sink"] + type: "feat" + breaking_change: false + author: "Lucio Franco" + files_count: 16 + insertions_count: 1082 + deletions_count: 7 + }, { + sha: "ce7bdbaf7155c74f1a4f2e9fd5d50d2c3a92899f" + date: "2020-03-23 14:05:42 +0000" + description: "Add test harness GH action" + pr_number: 2028 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 142 + deletions_count: 0 + }, { + sha: "9be091bc2497b9df5af948d6d13ed952fe454477" + date: "2020-03-23 13:21:20 +0000" + description: "TLS reorganization" + pr_number: 2101 + scopes: [] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 17 + insertions_count: 586 + deletions_count: 473 + }, { + sha: "f1bb0c65d5230ea03905cf3741fa11f3ec281b47" + date: "2020-03-23 14:53:16 +0000" + description: "Rename `gcp_stackdriver_logging` sink to `gcp_stackdriver_logs`" + pr_number: 2121 + scopes: ["gcp_stackdrive_logging sink"] + type: "fix" + breaking_change: true + author: "Bruce Guenter" + files_count: 12 + insertions_count: 55 + deletions_count: 55 + }, { + sha: "ff4a6dccefc16f56910d49b996f797ef75de1759" + date: "2020-03-24 16:28:51 +0000" + description: "Support metric events in version 2" + pr_number: 2095 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 11 + insertions_count: 1254 + deletions_count: 130 + }, { + sha: "0088ebd712f97906bbf6894f0165385cf59a535b" + date: "2020-03-24 10:25:29 +0000" + description: "Revert \"Use MaybeTlsListener in TcpSource\"" + pr_number: 2129 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 3 + insertions_count: 62 + deletions_count: 47 + }, { + sha: "6edfbc9e7b498bdade9e52d5462b260f50f83792" + date: "2020-03-24 13:40:29 +0000" + description: "Make LogSchema more optional" + pr_number: 2113 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 15 + insertions_count: 92 + deletions_count: 65 + }, { + sha: "25e76f7e0590fa23d4efd17483949f5df80ca83b" + date: "2020-03-24 17:11:33 +0000" + description: "Add `User-agent` header to all outgoing HTTP requests" + pr_number: 2130 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 11 + deletions_count: 7 + }, { + sha: "2121b7dfa3cdaa342e8a7dbcafb8e3531c36f875" + date: "2020-03-25 01:59:52 +0000" + description: "Rename tokio -> tokio01 and tokio02 -> tokio" + pr_number: 2131 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 50 + insertions_count: 76 + deletions_count: 78 + }, { + sha: "2671cb5f82a89cea1c9d081c3051aa7123017a85" + date: "2020-03-24 21:08:00 +0000" + description: "Relax PR check list to only fire on important changes" + pr_number: 2136 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 0 + deletions_count: 6 + }, { + sha: "fe2e12ce89bc69657851a7a8f4dfeb3b4d334dc8" + date: "2020-03-25 16:31:59 +0000" + description: "Add new `index` option to specify custom Splunk indexes" + pr_number: 2127 + scopes: ["splunk_hec sink"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 100 + deletions_count: 7 + }, { + sha: "7ab9df34cdc8e7d0d6155bfbbd58aff61cfaad64" + date: "2020-03-25 20:07:09 +0000" + description: "Update `bumpalo` dependency in `Cargo.lock`" + pr_number: 2141 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "6443f9c7a6b376d662a2a59b59c433209c9eb872" + date: "2020-03-25 12:46:54 +0000" + description: "Fix handling of incoming TLS connections" + pr_number: 2146 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Bruce Guenter" + files_count: 7 + insertions_count: 205 + deletions_count: 136 + }, { + sha: "113bb800937be0f240b92fe95396a9740e7fbb15" + date: "2020-03-25 12:31:40 +0000" + description: "The `add_fields`, `remove_fields`, and `rename_fields` now produce a debug log when a field is replaced or non-existent" + pr_number: 2148 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Ana Hobden" + files_count: 17 + insertions_count: 102 + deletions_count: 42 + }, { + sha: "fc19c12027ebaf97764b5538c04e52974381b574" + date: "2020-03-25 20:18:54 +0000" + description: "Simplify domains for labels, etc" + pr_number: 2152 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 4 + insertions_count: 17 + deletions_count: 53 + }, { + sha: "0a5eccd17973dba291423ae3e151706aa82e1e7e" + date: "2020-03-26 17:42:42 +0000" + description: "Upstream github-script action at test harness workflow" + pr_number: 2157 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "a8ed1f7aa62266bd051c120d5f06cd4c105f8709" + date: "2020-03-26 21:09:49 +0000" + description: "Switch to upstream repo-permission-check-action" + pr_number: 2158 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "16c714e190f9711810194bf9fd3f48c5730aec2f" + date: "2020-03-27 00:13:46 +0000" + description: "Add LogEvent::new" + pr_number: 2154 + scopes: [] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 6 + insertions_count: 34 + deletions_count: 38 + }, { + sha: "3cb486531d49e1702ea6bbf20c619c3d3476201a" + date: "2020-03-27 12:26:47 +0000" + description: "Increase default rate limit to allow for higher throughput" + pr_number: 2161 + scopes: ["aws_s3 sink"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 4 + insertions_count: 14 + deletions_count: 14 + }, { + sha: "068f289ffca61e5631237760c88deaf90ca0ab24" + date: "2020-03-27 10:49:02 +0000" + description: "Add support for UDP mode" + pr_number: 2162 + scopes: ["socket sink"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 304 + deletions_count: 40 + }, { + sha: "53ae00b610a526050a6f90121d91c3b100a386f7" + date: "2020-03-27 15:58:49 +0000" + description: "Remove package managers from install script" + pr_number: 2165 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 5 + deletions_count: 118 + }, { + sha: "de932f7f2a345923476bbaef1cbd44d91ce06870" + date: "2020-03-27 22:59:40 +0000" + description: "Support ARM architectures in the install script" + pr_number: 2167 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "f2f9b269d9f9dfe9b1d2af60821711c4c101cdd9" + date: "2020-03-28 19:08:04 +0000" + description: "Make config on the main page pass `vector validate`" + pr_number: 2168 + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 6 + deletions_count: 5 + }, { + sha: "49cac24a70f5530507bf50d977edbcecc9df2c4f" + date: "2020-03-28 20:32:11 +0000" + description: "Disable duplicated version output in `make version`" + pr_number: 2169 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "402e8c3ae0af6bbafa18aea729d684bca78f61cd" + date: "2020-03-29 19:52:23 +0000" + description: "Enable Kubernetes tests" + pr_number: 1970 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 9 + insertions_count: 215 + deletions_count: 8 + }, { + sha: "686919a34b89a295185ce5326855eb2534cd523c" + date: "2020-03-30 16:56:55 +0000" + description: "Use `UTC` instead of `Europe/London` in ClickHouse tests" + pr_number: 2178 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "9b5dd8b48ae4ce4930cac0328cd11ad48ad89e57" + date: "2020-03-30 10:20:08 +0000" + description: "Add support for `tls.verify_hostname` in HTTP based sinks" + pr_number: 2164 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 15 + insertions_count: 251 + deletions_count: 9 + }, { + sha: "8ac0fdebcbacf04a6bf72a3314f414520ef306b2" + date: "2020-03-30 10:46:59 +0000" + description: "Add `have: *` labels" + pr_number: 2179 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 112 + deletions_count: 0 + }, { + sha: "7b2ff837cf550f3721b8b187432d9e33229d7ea1" + date: "2020-03-30 11:17:11 +0000" + description: "Rename `prefix` condition predicate to `starts_with`" + pr_number: 2181 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 71 + deletions_count: 21 + }, { + sha: "e06aff56ac8cd7f041795262a812b74552bb96c7" + date: "2020-03-31 09:05:05 +0000" + description: "Add new `ends_with` condition predicate" + pr_number: 2183 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 150 + deletions_count: 0 + }, { + sha: "9b2d89572e54988a339c51bb26a47fb6205286e2" + date: "2020-03-31 13:33:44 +0000" + description: "Add new guides section" + pr_number: 2132 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1118 + insertions_count: 74832 + deletions_count: 6156 + }, { + sha: "1525ef63c32884069ddb782e1397cbb6892dc0b9" + date: "2020-03-31 17:27:41 +0000" + description: "Check for and log partial ingestion failures" + pr_number: 2185 + scopes: ["elasticsearch sink"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 180 + deletions_count: 44 + }, { + sha: "2463e1330237d98e0f111c796724423010b6b222" + date: "2020-03-31 18:08:24 +0000" + description: "Initial `filter` transform implementation" + pr_number: 2088 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Bruce Guenter" + files_count: 21 + insertions_count: 686 + deletions_count: 22 + }, { + sha: "ef86f521a4cf3dc63ae33902e3a43fb2e30c261d" + date: "2020-04-01 08:26:38 +0000" + description: "Deprecate in favor of the new `filter` transform" + pr_number: 2195 + scopes: ["field_filter transform"] + type: "enhancement" + breaking_change: false + author: "Binary Logic" + files_count: 13 + insertions_count: 12 + deletions_count: 326 + }, { + sha: "a73222e591eabf3e95a0467712bcdaf2855fae99" + date: "2020-04-01 10:29:17 +0000" + description: "Update to Docusaurus 2.0.0-alpha.49" + pr_number: 2196 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 443 + insertions_count: 1681 + deletions_count: 2029 + }, { + sha: "a2ea56c9cb152d458a14f562ada6c04f126b3ebc" + date: "2020-04-01 13:15:20 +0000" + description: "Refactor internal sinks and simplify" + pr_number: 2111 + scopes: ["sinks"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 38 + insertions_count: 1748 + deletions_count: 1245 + }, { + sha: "9add2cbe5357167393c4c8bb0b73a1377d58acda" + date: "2020-04-01 19:41:57 +0000" + description: "Inital `kubernetes_pod_metadata` transform implementation" + pr_number: 1888 + scopes: ["new transform"] + type: "feat" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 109 + insertions_count: 2229 + deletions_count: 2696 + }, { + sha: "9b8ba8edf4f857e0305eb1157bb0a6e8fe56490e" + date: "2020-04-01 14:24:12 +0000" + description: "Add new `regex` condition predicate" + pr_number: 2198 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 8 + insertions_count: 223 + deletions_count: 4 + }, { + sha: "029c0bf6cd0359f08bc7cef99eb4ca6502a52252" + date: "2020-04-01 14:25:13 +0000" + description: "Warn that old `field_filter` transform is deprecated" + pr_number: 2197 + scopes: ["observability"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 1 + insertions_count: 4 + deletions_count: 0 + }, { + sha: "ddd54782d5eeac8f91c7841a3cb29d66ab169e95" + date: "2020-04-01 14:37:04 +0000" + description: "Add new `target_field` option" + pr_number: 2023 + scopes: ["regex_parser transform"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 5 + insertions_count: 289 + deletions_count: 79 + }, { + sha: "dd9e328faf7b41269cd87ce45b668c1dcd610f92" + date: "2020-04-02 00:13:22 +0000" + description: "Rename map_values_array to array_values_insertion_order test at discriminant" + pr_number: 2140 + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "45f3c09bc8bad8e05429b9c86eab5c34ed8f5928" + date: "2020-04-01 18:36:57 +0000" + description: "add event-driven observability rfc" + pr_number: 2093 + scopes: [] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 275 + deletions_count: 0 + }, { + sha: "f26b0add310476ad58eb80fcb43e240737fdb617" + date: "2020-04-02 09:56:34 +0000" + description: "Fix `make generate` command" + pr_number: 2205 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 2 + insertions_count: 1 + deletions_count: 2 + }, { + sha: "490f984763c33cf38d0b39545c8354109da22e38" + date: "2020-04-02 10:44:15 +0000" + description: "Add missing `compression` option and…" + pr_number: 2206 + scopes: ["elasticsearch sink"] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 159 + deletions_count: 30 + }, { + sha: "6d17361cadccafb470083cbd54ee84d68129b8ed" + date: "2020-04-02 11:56:52 +0000" + description: "Improve Windows Makefile support" + pr_number: 2150 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 12 + deletions_count: 4 + }, { + sha: "c38aeb298e47d34cc4b19af290fb8d6b46589c91" + date: "2020-04-02 23:26:59 +0000" + description: "Lift the internal thread limit to enable full concurrency" + pr_number: 2145 + scopes: [] + type: "perf" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 4 + deletions_count: 5 + }, { + sha: "5ec77297736c14b291d7b18bc425cf91fd8629e8" + date: "2020-04-02 21:54:54 +0000" + description: "minor fixes in Loki sink documentation" + pr_number: 2211 + scopes: [] + type: "docs" + breaking_change: false + author: "Brad Fritz" + files_count: 2 + insertions_count: 10 + deletions_count: 10 + }, { + sha: "1136cb5af597e10b6387c75451ee477c4aca45d4" + date: "2020-04-02 22:03:26 +0000" + description: "fix \"issie\" typo" + pr_number: 2212 + scopes: [] + type: "docs" + breaking_change: false + author: "Brad Fritz" + files_count: 20 + insertions_count: 38 + deletions_count: 38 + }, { + sha: "b3450b3be8775a830bac9e734c1bbb37820adf6a" + date: "2020-04-03 09:27:55 +0000" + description: "Detect TCP disconnects earlier" + pr_number: 2209 + scopes: ["networking"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 2 + insertions_count: 52 + deletions_count: 15 + }, { + sha: "8d110c0a03edb5faf02a8be8854d1a77148daee9" + date: "2020-04-03 10:55:15 +0000" + description: "disable flaky test" + pr_number: 2215 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Luke Steensen" + files_count: 1 + insertions_count: 1 + deletions_count: 0 + }, { + sha: "1d31f2bc76b20cd0da7ebc1b4838e8014078b061" + date: "2020-04-03 20:43:35 +0000" + description: "Fix disk pressure " + pr_number: 2217 + scopes: ["tests"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "eb60ce21c16514c039e5ffe59edc24c5291ca752" + date: "2020-04-05 09:56:35 +0000" + description: "Fix sitemap URLs and CSS layouts" + pr_number: 2229 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 36 + insertions_count: 753 + deletions_count: 424 + }, { + sha: "906d6db0177ecfdb27020d0a85bcf4b65e43ee57" + date: "2020-04-05 11:54:15 +0000" + description: "RPM for aarch64 was using armv7" + pr_number: 2220 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Brad Fritz" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "e0a2451787fcca948cbc27fb0e25482a5ababa96" + date: "2020-04-05 12:28:19 +0000" + description: "Default `compression` to `none`" + pr_number: 2219 + scopes: ["elasticsearch sink"] + type: "fix" + breaking_change: true + author: "Bruce Guenter" + files_count: 154 + insertions_count: 1651 + deletions_count: 90 + }, { + sha: "30808b5d3bd6b2bde6aed454a61ee36642473262" + date: "2020-04-06 15:06:48 +0000" + description: "Add field tests" + pr_number: 2213 + scopes: ["kubernetes_pod_metadata transform"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 135 + deletions_count: 20 + }, { + sha: "22cff2e9b74f60ef5e2e486576e7f695695076b8" + date: "2020-04-06 20:18:28 +0000" + description: "Add `-qqq` command line flag to disable logs" + pr_number: 2230 + scopes: ["cli"] + type: "feat" + breaking_change: false + author: "Felix" + files_count: 3 + insertions_count: 20 + deletions_count: 15 + }, { + sha: "23d2ef5229a5a6979c0174e573960d3a1e101392" + date: "2020-04-06 15:00:24 +0000" + description: "Add basic module docs" + pr_number: 2233 + scopes: ["topology"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 8 + deletions_count: 0 + }, { + sha: "130df3199139bfa84648d235ed34a0c292f2cfa8" + date: "2020-04-06 15:47:59 +0000" + description: "Pass `tls` settings to healthcheck" + pr_number: 2234 + scopes: ["loki sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 2 + deletions_count: 1 + }, { + sha: "6dde571f89638ecc28f5c633415febff0729e198" + date: "2020-04-06 23:16:43 +0000" + description: "Don't use `buildx` plugin by default" + pr_number: 2239 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 25 + deletions_count: 13 + }, { + sha: "7d9ebc3d580eff9d12f31563306d0d8bf3285248" + date: "2020-04-06 16:38:58 +0000" + description: "Add verify cert notes" + pr_number: 2240 + scopes: ["security"] + type: "docs" + breaking_change: false + author: "Lucio Franco" + files_count: 31 + insertions_count: 216 + deletions_count: 54 + }, { + sha: "5890e2468e1d4c2a48e02c7d5235e9b9f65a29fc" + date: "2020-04-07 10:22:18 +0000" + description: "Pass `tls` settings to healthcheck" + pr_number: 2238 + scopes: ["influxdb_metrics sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 21 + deletions_count: 11 + }, { + sha: "0d0a8fab60439834e6be155f4c0b616bfd77faec" + date: "2020-04-07 18:34:59 +0000" + description: "Implement all hooks and timers in version 2 " + pr_number: 2126 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 85 + insertions_count: 5940 + deletions_count: 4060 + }, { + sha: "2f803293dbbe793c4a849b344feb5e4ae6d52e1b" + date: "2020-04-07 20:53:16 +0000" + description: "Support metric events" + pr_number: 2245 + scopes: ["vector source", "vector sink"] + type: "enhancement" + breaking_change: false + author: "Alexander Rodin" + files_count: 6 + insertions_count: 28 + deletions_count: 13 + }, { + sha: "e4c383881b7727289a87327fcd081b1f366952a6" + date: "2020-04-07 21:48:50 +0000" + description: "Fix link to the Docker platform on the main page" + pr_number: 2249 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "11a898c824ca2955c75a4d58e87081bfb4bb2f2e" + date: "2020-04-07 22:23:19 +0000" + description: "Document default value for `acl`" + pr_number: 2252 + scopes: ["gcp_cloud_storage sink"] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 3 + insertions_count: 5 + deletions_count: 4 + }, { + sha: "230cd45552021ea499d47d6b39f74730dbdc2a40" + date: "2020-04-07 15:31:22 +0000" + description: "Improve `logfmt_parser` docs and examples" + pr_number: 2251 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 25 + insertions_count: 834 + deletions_count: 525 + }, { + sha: "febbdfb39546c15a430b808226a03ad5a5bc41ec" + date: "2020-04-07 22:58:54 +0000" + description: "Use external tagging for metrics serialization" + pr_number: 2231 + scopes: ["ux"] + type: "enhancement" + breaking_change: true + author: "Alexander Rodin" + files_count: 7 + insertions_count: 183 + deletions_count: 232 + }, { + sha: "a9efeb9c8a45209c0fb64f0d043500a19f41fa7f" + date: "2020-04-08 17:10:31 +0000" + description: "Use cat with heredoc instead of echo and quotes" + pr_number: 2262 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "MOZGIII" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "f9c4b4ba1282d7cd4cc3b773305e611beac7a99f" + date: "2020-04-08 16:19:14 +0000" + description: "Add `source_key` and `source_type_key` to…" + pr_number: 2244 + scopes: ["config"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 6 + insertions_count: 198 + deletions_count: 2 + }, { + sha: "6b7cc0cf1009ecb1c62c7d1458945203a9ea5f48" + date: "2020-04-08 22:52:35 +0000" + description: "Use kebab case for ACL encoding" + pr_number: 2259 + scopes: ["gcp_cloud_storage sink"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "5111f3a5b522fa06ca2ddf321e668246a78dc9fa" + date: "2020-04-08 15:23:05 +0000" + description: "Initial `internal` metrics source implementation" + pr_number: 1953 + scopes: ["new source"] + type: "feat" + breaking_change: false + author: "Luke Steensen" + files_count: 38 + insertions_count: 1332 + deletions_count: 654 + }, { + sha: "308351a572c5e881454c6d73b8a9d6c92eac9a9e" + date: "2020-04-08 16:42:27 +0000" + description: "Accept metrics as input" + pr_number: 2267 + scopes: ["filter transform"] + type: "enhancement" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 34 + deletions_count: 15 + }, { + sha: "04e2bf3abef0af3a9398f6c97f917c3658fd993f" + date: "2020-04-08 23:54:49 +0000" + description: "Fix healthchecks" + pr_number: 2276 + scopes: ["aws_kinesis_firehose sink"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 13 + deletions_count: 16 + }, { + sha: "162e064e6c57c7b38d010893c041728ba40adb0a" + date: "2020-04-08 18:53:35 +0000" + description: "Only run required requests based on the supplied `fields`" + pr_number: 2265 + scopes: ["aws_ec2_metadata transform"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 35 + deletions_count: 48 + }, { + sha: "e9192ad2b2755c3bd1ee59f6b13f255ac4efd8f3" + date: "2020-04-09 12:40:00 +0000" + description: "Set `search_dirs` to config dirs by default" + pr_number: 2274 + scopes: ["lua transform"] + type: "enhancement" + breaking_change: true + author: "Alexander Rodin" + files_count: 7 + insertions_count: 42 + deletions_count: 13 + }, { + sha: "90f55ff7ae3222c05971482b72c4715d29a83ac9" + date: "2020-04-09 14:19:17 +0000" + description: "Add blog post on how we test vector" + pr_number: 1363 + scopes: [] + type: "docs" + breaking_change: false + author: "Luke Steensen" + files_count: 14 + insertions_count: 492 + deletions_count: 19 + }, { + sha: "0b1ca4efe31e04d2c7c4e61e40112833dc3eb372" + date: "2020-04-09 15:12:30 +0000" + description: "Make ACL optional" + pr_number: 2283 + scopes: ["gcp_cloud_storage sink"] + type: "enhancement" + breaking_change: true + author: "Bruce Guenter" + files_count: 4 + insertions_count: 18 + deletions_count: 12 + }, { + sha: "6d4d3b48b1563ef30f312eb7e2a4ef4f9df84652" + date: "2020-04-10 09:41:25 +0000" + description: "Add elegant shutdown behavior" + pr_number: 2260 + scopes: ["docker source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 14 + deletions_count: 5 + }, { + sha: "5f78ebb7c3e21f878859b72dff6450c45c7359dc" + date: "2020-04-10 13:13:35 +0000" + description: "Make `RuntimeTransform` trait available for all transforms" + pr_number: 2281 + scopes: ["transforms"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 13 + deletions_count: 8 + }, { + sha: "e0805cc76343a218ed610f84c174a184f39098e8" + date: "2020-04-10 09:01:14 +0000" + description: "correct typo in testing post" + pr_number: 2291 + scopes: ["website"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 2 + insertions_count: 5 + deletions_count: 5 + }, { + sha: "eb3e71e53699bbd8de90018c4dc0f803048a4178" + date: "2020-04-10 11:02:44 +0000" + description: "small fixes for testing post" + pr_number: 2292 + scopes: ["website"] + type: "fix" + breaking_change: false + author: "Luke Steensen" + files_count: 5 + insertions_count: 8 + deletions_count: 7 + }, { + sha: "a85197d8a1dfeee7b705dd996516510d4dd60431" + date: "2020-04-10 14:16:16 +0000" + description: "Run healthcheck in rt context" + pr_number: 2288 + scopes: ["kafka sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 3 + insertions_count: 45 + deletions_count: 7 + }, { + sha: "b6e53d52249370e8b5bd4006fca674c3c99b8302" + date: "2020-04-10 17:49:22 +0000" + description: "Start removing Atom from the hot path" + pr_number: 2295 + scopes: [] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 17 + insertions_count: 92 + deletions_count: 98 + }, { + sha: "9d97e1ea53a24fcba4dda28406a48b642a42db09" + date: "2020-04-11 02:32:14 +0000" + description: "Add a CSV parsing guide" + pr_number: 2285 + scopes: ["lua transform"] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 8 + insertions_count: 588 + deletions_count: 1 + }, { + sha: "b477981125866e6d3467cd12214e5049285c89b4" + date: "2020-04-11 23:21:27 +0000" + description: "Add fast path and caching api to event insert" + pr_number: 2296 + scopes: [] + type: "perf" + breaking_change: false + author: "Luke Steensen" + files_count: 5 + insertions_count: 67 + deletions_count: 13 + }, { + sha: "bc48a21e95a9802a5cf57326b4be1c383c567e27" + date: "2020-04-13 15:26:12 +0000" + description: "Enable Rust codegen LTO for test harness" + pr_number: 2310 + scopes: ["tests"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "e4d012df9210a22fe95729f8f98127e8cb050ece" + date: "2020-04-13 13:26:56 +0000" + description: "Upgrade http client to hyper 0.13" + pr_number: 2294 + scopes: ["networking"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 6 + insertions_count: 582 + deletions_count: 25 + }, { + sha: "23f1781f39d98315106455309a3251aaa88eeff0" + date: "2020-04-13 14:15:47 +0000" + description: "Fix Windows metrics" + pr_number: 2309 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 0 + deletions_count: 1 + }, { + sha: "dfe4620245bffddc066ccfe47a4ab9d70e6d9649" + date: "2020-04-14 13:20:55 +0000" + description: "Add `source_type` field" + pr_number: 2298 + scopes: ["sources"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 17 + insertions_count: 213 + deletions_count: 7 + }, { + sha: "fb8ae10f193c1990259500941ddd90f01472063e" + date: "2020-04-14 19:11:17 +0000" + description: "Add `lto = true` and `codegen-units=1` to `Cargo.toml`" + pr_number: 2321 + scopes: [] + type: "perf" + breaking_change: false + author: "Alexander Rodin" + files_count: 4 + insertions_count: 5 + deletions_count: 22 + }, { + sha: "e8723f5eda7cbf0986b8f4b45c9ac394ef2b6f3b" + date: "2020-04-14 19:44:31 +0000" + description: "Sort events by timestamp" + pr_number: 2320 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 2 + insertions_count: 78 + deletions_count: 2 + }, { + sha: "5d7421f07080bfd7e7563ebb326f4b0281d65508" + date: "2020-04-14 20:50:16 +0000" + description: "Add /highlights section and rework release notes" + pr_number: 2317 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 302 + insertions_count: 7552 + deletions_count: 3485 + }, { + sha: "1b4c0014247e69aec6761e544bfae6cd29f6acae" + date: "2020-04-15 06:32:47 +0000" + description: "Wire ShutdownSignal in `trait HttpSource`" + pr_number: 2290 + scopes: ["sources"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 15 + deletions_count: 16 + }, { + sha: "b43fdecfed76081b86cf2d04a933787d33bd5455" + date: "2020-04-15 06:35:56 +0000" + description: "Show possible values for `--log-format`, `--color`, and `--format`" + pr_number: 2300 + scopes: ["cli"] + type: "enhancement" + breaking_change: false + author: "Felix" + files_count: 4 + insertions_count: 47 + deletions_count: 23 + }, { + sha: "6cb49ce04aefe69dfdac6e489001990a31c7343a" + date: "2020-04-15 07:36:24 +0000" + description: "Clarify dependencies of the `package-*` job in `make hel…" + pr_number: 2324 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 2 + deletions_count: 2 + }, { + sha: "36892068ca48b470f6a16f35e1a57753074f32ca" + date: "2020-04-15 06:41:11 +0000" + description: "Remove `log_schema.source_key`" + pr_number: 2297 + scopes: ["config"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 2 + deletions_count: 112 + }, { + sha: "e2eb9e7da20337e3522e9f4ad5ca9b049e8c26e2" + date: "2020-04-14 21:41:47 +0000" + description: "Add Twitter author tags" + pr_number: 2299 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 6 + deletions_count: 0 + }, { + sha: "b152b53e8ec709c0ff9eb64e64d0f8b9a4cb0441" + date: "2020-04-15 18:21:52 +0000" + description: "Improve topology shutdown logs" + pr_number: 2345 + scopes: ["topology"] + type: "enhancement" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 13 + deletions_count: 2 + }, { + sha: "de0d6f6c4e6c23f4fea460835b22d7ecc175fde4" + date: "2020-04-15 18:42:27 +0000" + description: "Reduce credentials fetch timeout" + pr_number: 2342 + scopes: ["sinks"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 13 + deletions_count: 6 + }, { + sha: "148abb54c4b62c375369f96e33f8f0bfea620c41" + date: "2020-04-16 16:29:15 +0000" + description: "Don't require `isdst`, `wday`, `yday` in timestamps" + pr_number: 2335 + scopes: ["lua transform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 3 + }, { + sha: "60385ec2dd58ecbb8d449dea50981dd9afc3f84e" + date: "2020-04-16 09:32:47 +0000" + description: "Bump `tower-limit 0.1.2`" + pr_number: 2346 + scopes: ["dependencies"] + type: "chore" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 3 + deletions_count: 3 + }, { + sha: "f14ba50cbd3bad22b3aa5aadd60f83ccf7319ded" + date: "2020-04-16 16:33:15 +0000" + description: "Use only the filename part of the paths to extract dates" + pr_number: 2347 + scopes: [] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }, { + sha: "19bff5ca961d304744b2586f27ac24e3616a7024" + date: "2020-04-16 21:56:11 +0000" + description: "Add a guide about merging multi-line events" + pr_number: 2354 + scopes: ["lua transform"] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 1321 + deletions_count: 0 + }, { + sha: "225bee0537660cfa84d33cedce48e0926c533672" + date: "2020-04-17 10:50:47 +0000" + description: "Add highlights section to contributing" + pr_number: 2348 + scopes: [] + type: "docs" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 50 + deletions_count: 0 + }, { + sha: "b94373a614fdfb0a8baf231450d44fb0155b99c6" + date: "2020-04-17 16:55:17 +0000" + description: "fix typo 'desigend'" + pr_number: 2350 + scopes: [] + type: "docs" + breaking_change: false + author: "Erwan D" + files_count: 134 + insertions_count: 267 + deletions_count: 267 + }, { + sha: "d77a2f59e7ad18a28d4be90cc3da40e8ca49fe4f" + date: "2020-04-17 17:57:38 +0000" + description: "Cache paths for parsed fields" + pr_number: 2325 + scopes: ["tokenizer transform"] + type: "perf" + breaking_change: false + author: "Alexander Rodin" + files_count: 2 + insertions_count: 9 + deletions_count: 7 + }, { + sha: "7c6608df02259778aaa3cc7770d96c6aabd75669" + date: "2020-04-17 11:57:04 +0000" + description: "Update Ana bio" + pr_number: 2360 + scopes: ["website"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 2 + insertions_count: 9 + deletions_count: 5 + }, { + sha: "72da605fd619c0321d9c8ffd176680f225ca88d5" + date: "2020-04-17 12:06:36 +0000" + description: "Migrate some PR checks to Github Actions from Circle" + pr_number: 2352 + scopes: ["platforms"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 5 + insertions_count: 205 + deletions_count: 120 + }, { + sha: "6b95f7e17e4af25c689a108cd3fa6941d97d5593" + date: "2020-04-17 13:47:27 +0000" + description: "Add support for instance accounts" + pr_number: 2351 + scopes: ["gcp provider"] + type: "enhancement" + breaking_change: false + author: "Bruce Guenter" + files_count: 11 + insertions_count: 114 + deletions_count: 39 + }, { + sha: "7ea0eed5ee4c24df66efba60403ecf0b5486ebb4" + date: "2020-04-18 11:35:30 +0000" + description: "Wire in new `ShutdownSignal` for improved shutdown" + pr_number: 2261 + scopes: ["file source"] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 4 + insertions_count: 139 + deletions_count: 94 + }, { + sha: "9c9def9d7d9f6d2319ec31241632f521d8e597d4" + date: "2020-04-18 10:29:23 +0000" + description: "Allow creation race of groups and streams" + pr_number: 2355 + scopes: ["aws_cloudwatch_logs sink"] + type: "fix" + breaking_change: false + author: "Lucio Franco" + files_count: 1 + insertions_count: 26 + deletions_count: 2 + }, { + sha: "f119d617293e81a2f7093dac549cd8ac5c26303f" + date: "2020-04-18 15:47:29 +0000" + description: "Only run CI workflow when relevant files change" + pr_number: 2369 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 65 + deletions_count: 60 + }, { + sha: "32e73588a7e2b3e9fd80f3eb0184339405b044fc" + date: "2020-04-19 12:48:00 +0000" + description: "Fix tests" + pr_number: 2372 + scopes: [] + type: "chore" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 14 + deletions_count: 4 + }, { + sha: "fde51c235f6d3c205a34a768c9df9e3f5e537dd5" + date: "2020-04-19 21:21:23 +0000" + description: "Add nightly GH action workflow" + pr_number: 2370 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 246 + deletions_count: 86 + }, { + sha: "c59eedf0943f7b02295c75907fbf3024bd884f11" + date: "2020-04-19 21:37:28 +0000" + description: "Move /test-data to /tests/data" + pr_number: 2375 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 6 + insertions_count: 4 + deletions_count: 5 + }, { + sha: "e041007b0849c8de0e1b80272e1875d413a5aad8" + date: "2020-04-20 12:57:02 +0000" + description: "Run ci workflow on pull requests only" + pr_number: 2378 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 1 + insertions_count: 16 + deletions_count: 1 + }, { + sha: "8442531f7190e30820be157be21be77bd9c27a15" + date: "2020-04-20 19:15:22 +0000" + description: "Wire in new `ShutdownSignal` for improved shutdown" + pr_number: 2373 + scopes: ["splunk_hec source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 1 + insertions_count: 22 + deletions_count: 50 + }, { + sha: "806f24664147d71ab866ef156d9f3762bac7992c" + date: "2020-04-20 19:17:28 +0000" + description: "Wire in `ShutdownSignal` for improved shutdown" + pr_number: 2364 + scopes: ["journald source"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 3 + insertions_count: 57 + deletions_count: 52 + }, { + sha: "512cb2ebb3f85997ef5b9966e1b501d531125c35" + date: "2020-04-20 19:20:14 +0000" + description: "Wire in new `ShutdownSignal` for improved shutdown" + pr_number: 2365 + scopes: ["sources"] + type: "enhancement" + breaking_change: false + author: "Kruno Tomola Fabro" + files_count: 7 + insertions_count: 50 + deletions_count: 18 + }, { + sha: "b7af16744363667c75948cbbf1b197a21f237324" + date: "2020-04-20 10:55:19 +0000" + description: "Reenable component feature check" + pr_number: 2371 + scopes: ["platforms"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 3 + insertions_count: 36 + deletions_count: 12 + }, { + sha: "ec30c6166b69e2a91514440347abf090a52fd6e9" + date: "2020-04-20 13:21:43 +0000" + description: "support sub-second timestamp resolution" + pr_number: 2333 + scopes: ["lua transform"] + type: "fix" + breaking_change: false + author: "Joe Schafer" + files_count: 4 + insertions_count: 77 + deletions_count: 9 + }, { + sha: "74126709debf66fc576a1a064244be98e223a0a6" + date: "2020-04-20 13:29:30 +0000" + description: "Optimize binaries in `test-stable` CI to reduce binary sizes. This should make it more stable." + pr_number: 2380 + scopes: ["operations"] + type: "chore" + breaking_change: false + author: "Ana Hobden" + files_count: 1 + insertions_count: 14 + deletions_count: 0 + }, { + sha: "514f8b1ed5596691e4f083657a61ffe935b06b7f" + date: "2020-04-20 16:29:54 +0000" + description: "Prepare 0.9.0" + pr_number: 2353 + scopes: [] + type: "chore" + breaking_change: false + author: "Binary Logic" + files_count: 119 + insertions_count: 27658 + deletions_count: 16436 + }] + } + "0.8.2": { + date: "2020-03-06" + codename: "" + whats_next: [] + commits: [{ + sha: "361f5d1688a1573e9794c4decb0aec26e731de70" + date: "2020-03-05 09:25:36 +0000" + description: "Enable file sink in generate subcmd" + pr_number: 1989 + scopes: ["cli"] + type: "fix" + breaking_change: false + author: "Ashley Jeffs" + files_count: 1 + insertions_count: 5 + deletions_count: 1 + }, { + sha: "b709ce7a15e1b42bcaae765902968158b10567ac" + date: "2020-03-06 11:37:19 +0000" + description: "Explicitly call GC in `lua` transform" + pr_number: 1990 + scopes: ["lua transform"] + type: "fix" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 25 + deletions_count: 8 + }, { + sha: "bc81e26f137de5a7ff2b8f893d7839a2052bb8a8" + date: "2020-03-06 12:26:59 +0000" + description: "Fix broken links" + pr_number: null + scopes: [] + type: "docs" + breaking_change: false + author: "Alexander Rodin" + files_count: 5 + insertions_count: 9 + deletions_count: 7 + }, { + sha: "ee998b2078c7019481a25881ee71764e1260c6a5" + date: "2020-03-06 12:51:52 +0000" + description: "Use new Homebrew installer in CI" + pr_number: null + scopes: ["testing"] + type: "chore" + breaking_change: false + author: "Alexander Rodin" + files_count: 1 + insertions_count: 1 + deletions_count: 1 + }] + } + #Release: { + codename: string + date: string + commits: [{ + author: string + breaking_change: bool + date: =~"^\\d{4}-\\d{2}-\\d{2}" + description: string + deletions_count: >=0 & int + files_count: >=0 & int + insertions_count: >=0 & int + pr_number: >=0 & int | null + scopes: [] + sha: =~"^[a-z0-9]{40}$" + type: "chore" | "docs" | "enhancement" | "feat" | "fix" | "perf" | "status" + }] + whats_next: _ + } +} +remap: { + #Characteristic: { + anchor: string + name: string + title: string + description: string + } + #Characteristics: {} + #Example: { + title: string + source: string + } + #Type: "any" | "array" | "boolean" | "float" | "integer" | "object" | "null" | "path" | "string" | "regex" | "timestamp" + #Literal: { + anchor: string + name: string + title: string + description: string + characteristics: {} + examples: [string] + } + #Argument: { + name: string + description: string + required: bool + multiple: false + type: ["any" | "array" | "boolean" | "float" | "integer" | "object" | "null" | "path" | "string" | "regex" | "timestamp"] + } + #Function: { + anchor: string + name: string + category: "Array" | "Codec" | "Coerce" | "Debug" | "Enumerate" | "Event" | "Hash" | "IP" | "Number" | "Object" | "Parse" | "Random" | "String" | "System" | "Timestamp" | "Type" + description: string + notices: [] + arguments: [] + return: { + types: ["any" | "array" | "boolean" | "float" | "integer" | "object" | "null" | "path" | "string" | "regex" | "timestamp"] + } + internal_failure_reasons: [] + } + #FunctionCategory: "Array" | "Codec" | "Coerce" | "Debug" | "Enumerate" | "Event" | "Hash" | "IP" | "Number" | "Object" | "Parse" | "Random" | "String" | "System" | "Timestamp" | "Type" + #Error: { + anchor: "\(code)" + code: uint & >=100 & <1000 + description: string + rationale: string | null + resolution: string + title: string + examples: [{ + title: string + source: string + }] + } + #Principle: { + anchor: string + name: string + title: string + description: string + } + #Concept: { + anchor: string + name: string + title: string + description: string + characteristics: {} + } + #Expression: { + anchor: string + name: string + title: string + description: string + return: string + examples: [{ + title: string + source: string + }] + } + #Grammar: { + source: string + definitions: {} + } + #Feature: { + anchor: string + name: string + title: string + description: string + principles: { + performance: bool + safety: bool + } + characteristics: {} + } + #Syntax: { + anchor: string + name: string + title: string + description: string + } + concepts: { + event: { + anchor: "event" + name: "event" + title: "Event" + description: """ + VRL programs operate on observability [events](https://vector.dev/docs/about/under-the-hood/architecture/data-model/). This VRL program, for example, adds + a field to a log event: + + ```vrl + .new_field = "new value" + ``` + + The event at hand is the entire context of the VRL program. + """ + characteristics: { + path: { + anchor: "path" + name: "path" + title: "Paths" + description: """ + [Path expressions](https://vector.dev/docs/reference/vrl/expressions/#path) enable you to access values inside the event: + + ```vrl + .kubernetes.pod_id + ``` + """ + } + } + } + expression: { + anchor: "expression" + name: "expression" + title: "Event" + description: """ + VRL is an expression-oriented language. A VRL program consists entirely of [expressions](urls.vrl_expressions), + with every expression returning a value. + """ + characteristics: {} + } + literal: { + anchor: "literal" + name: "literal" + title: "Literal" + description: """ + As in most other languages, [literals](https://vector.dev/docs/reference/vrl/expressions/#literal-expressions) in VRL are values written exactly as they are meant + to be interpreted. Literals include things like strings, Booleans, and integers. + """ + characteristics: {} + } + program: { + anchor: "program" + name: "program" + title: "Program" + description: """ + A VRL program is the highest-level unit of computation. A program is the end result of combining an arbitrary + number of [expressions](#expression) operating on a single observability event. + """ + characteristics: {} + } + function: { + anchor: "function" + name: "function" + title: "Function" + description: """ + Like most languages, VRL includes [functions](https://vector.dev/docs/reference/vrl/functions/) that represent named procedures designed to + accomplish specific tasks. Functions are the highest-level construct of reusable code in VRL, which, for the + sake of simplicity, doesn't include modules, classes, or other complex constructs for organizing functions. + """ + characteristics: { + fallibility: { + anchor: "fallibility" + name: "fallibility" + title: "Fallibility" + description: """ + Some VRL functions are *fallible*, meaning that they can error. Any potential errors thrown by fallible + functions must be handled, a requirement enforced at compile time. + + This feature of VRL programs, which we call [fail safety](https://vector.dev/docs/reference/vrl/#fail-safety), is a defining + characteristic of VRL and a primary source of its safety guarantees. + """ + } + } + } + } + description: """ + **Vector Remap Language** (VRL) is an expression-oriented language designed for transforming observability data + (logs and metrics) in a [safe](https://vector.dev/docs/reference/vrl/#safety) and [performant](https://vector.dev/docs/reference/vrl/#performance) manner. It + features a simple [syntax](https://vector.dev/docs/reference/vrl/expressions/) and a rich set of built-in + [functions](https://vector.dev/docs/reference/vrl/functions/) tailored specifically to observability use cases. + + You can use VRL in Vector via the [`remap` transform](https://vector.dev/docs/reference/transforms/remap/), and for a more in-depth + picture, see the [announcement blog post](https://vector.dev/blog/vector-remap-language). + """ + errors: { + "104": { + anchor: "104" + code: 104 + title: "Unnecessary error assignment" + description: """ + The left-hand side of an [assignment expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) + needlessly handles errors even though the right-hand side _can't_ fail. + """ + rationale: """ + Assigning errors when one is not possible is effectively dead code that makes your program difficult to follow. + Removing the error assignment simplifies your program. + """ + resolution: "Remove the error assignment." + examples: [{ + title: "Unnecessary error assignment (strings)" + source: ".message, err = downcase(.message)" + diff: """ + -.message, err = downcase(.message) + +.message = downcase(.message) + """ + }] + } + "106": { + anchor: "106" + code: 106 + title: "Function argument arity mismatch" + description: """ + A [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) invokes a + function with too many arguments. + """ + rationale: null + resolution: "Remove the extra arguments to adhere to the function's documented signature." + examples: [{ + title: "Function argument arity mismatch" + source: "parse_json(.message, pretty: true)" + diff: """ + -parse_json(.message, pretty: true) + +parse_json(.message) + """ + }] + } + "100": { + anchor: "100" + code: 100 + title: "Unhandled root runtime error" + description: """ + A root expression is fallible and its [runtime error](https://vector.dev/docs/reference/vrl/errors/#runtime-errors) isn't handled in the VRL + program. + """ + rationale: """ + VRL is [fail safe](https://vector.dev/docs/reference/vrl/#fail-safety) and thus requires that all possible runtime errors be handled. + This provides important [safety guarantees](https://vector.dev/docs/reference/vrl/#safety) to VRL and helps to ensure that VRL programs + run reliably when deployed. + """ + resolution: """ + [Handle](https://vector.dev/docs/reference/vrl/errors/#handling) the runtime error by [assigning](https://vector.dev/docs/reference/vrl/errors/#assigning), + [coalescing](https://vector.dev/docs/reference/vrl/errors/#coalesing), or [raising](https://vector.dev/docs/reference/vrl/errors/#raising) the + error. + """ + examples: [{ + title: "Unhandled root runtime error (assigning)" + source: "get_env_var(\"HOST\")" + diff: """ + - \tget_env_var("HOST") + +# \t.host = get_env_var("HOST") + """ + }] + } + "110": { + anchor: "110" + code: 110 + title: "Invalid argument type" + description: """ + An argument passed to a [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) + isn't a supported type. + """ + rationale: """ + VRL is [type safe](https://vector.dev/docs/reference/vrl/#type-safety) and requires that types align upon compilation. This provides + important [safety guarantees](https://vector.dev/docs/reference/vrl/#safety) to VRL and helps to ensure that VRL programs run reliably when + deployed. + """ + resolution: """ + You must guarantee the type of the variable by using the appropriate [type](\\(urls.vrl_functions)#type) or + [coercion](\\(urls.vrl_functions)#coerce) function. + """ + examples: [{ + title: "Invalid argument type (guard with defaults)" + source: "downcase(.message)" + diff: """ + +.message = string(.message) ?? "" + downcase(.message) + """ + }, { + title: "Invalid argument type (guard with errors)" + source: "downcase(.message)" + diff: " downcase(string!(.message))" + }, { + title: "Invalid argument type (guard with if expressions)" + source: "downcase(.message)" + diff: """ + +if is_string(.message) { + \tdowncase(.message) + + } + """ + }] + } + "103": { + anchor: "103" + code: 103 + title: "Unhandled assignment runtime error" + description: """ + The right-hand side of an [assignment expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) + is fallible and can produce a [runtime error](https://vector.dev/docs/reference/vrl/errors/#runtime-errors), but the error isn't being + [handled](https://vector.dev/docs/reference/vrl/errors/#handling). + """ + rationale: """ + VRL is [fail safe](https://vector.dev/docs/reference/vrl/#fail-safety) and thus requires that all possible runtime errors be handled. + This provides important [safety guarantees](https://vector.dev/docs/reference/vrl/#safety) to VRL and helps to ensure that VRL programs + run reliably when deployed. + """ + resolution: """ + [Handle](https://vector.dev/docs/reference/vrl/errors/#handling) the runtime error by [assigning](https://vector.dev/docs/reference/vrl/errors/#assigning), + [coalescing](https://vector.dev/docs/reference/vrl/errors/#coalesing), or [raising](https://vector.dev/docs/reference/vrl/errors/#raising) the + error. + """ + examples: [{ + title: "Unhandled assignment runtime error (coalescing)" + input: { + log: { + message: "key=value" + } + } + diff: """ + -. |= parse_key_value(.message) + +. |= parse_key_value(.message) ?? {} + """ + source: ". |= parse_key_value(.message)" + }, { + title: "Unhandled assignment runtime error (raising)" + input: { + log: { + message: "key=value" + } + } + diff: """ + -. |= parse_key_value(.message) + +. |= parse_key_value!(.message) + """ + source: ". |= parse_key_value(.message)" + }, { + title: "Unhandled assignment runtime error (assigning)" + input: { + log: { + message: "key=value" + } + } + diff: """ + -. |= parse_key_value(.message) + +., err |= parse_key_value(.message) + """ + source: ". |= parse_key_value(.message)" + }] + } + "101": { + anchor: "101" + code: 101 + title: "Malformed regex literal" + description: """ + A [regex literal expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) is malformed + and thus doesn't result in a valid regular expression. + """ + rationale: "Invalid regular expressions don't compile." + resolution: """ + Regular expressions are difficult to write and commonly result in syntax errors. If you're parsing a common + log format we recommend using one of VRL's [`parse_*` functions](https://vector.dev/docs/reference/vrl/functions/#parsing). If + you don't see a function for your format please [request it](https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature). Otherwise, use the + [Rust regex tester](https://rustexp.lpil.uk/) to test and correct your regular expression. + """ + examples: [{ + title: "Malformed regex literal (common format)" + source: ". |= parse_regex!(.message, r'^(?P[\\w\\.]+) - (?P[\\w]+) (?P[\\d]+) \\[?P.*)\\] \"(?P[\\w]+) (?P.*)\" (?P[\\d]+) (?P[\\d]+)$')" + diff: """ + -. |= parse_regex!(.message, r'^(?P[\\w\\.]+) - (?P[\\w]+) (?P[\\d]+) \\[?P.*)\\] "(?P[\\w]+) (?P.*)" (?P[\\d]+) (?P[\\d]+)$') + +. |= parse_common_log!(.message) + """ + }] + } + "102": { + anchor: "102" + code: 102 + title: "Non-boolean if expression predicate" + description: """ + An [if expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) predicate doesn't + evaluate to a Boolean. + """ + rationale: """ + VRL doesn't implement "truthy" values (non-Boolean values that resolve to a Boolean, such as `1`) since these + are common foot-guns that can result in unexpected behavior when used in if expressions. This provides important + [safety guarantees](https://vector.dev/docs/reference/vrl/#safety) in VRL and ensures that VRL programs are reliable once deployed. + """ + resolution: """ + Adjust your if expression predicate to resolve to a Boolean. Helpful functions to solve this include + [`exists`](https://vector.dev/docs/reference/vrl/functions/#exists) and + [`is_nullish`](https://vector.dev/docs/reference/vrl/functions/#is_nullish). + """ + examples: [{ + title: "Non-boolean if expression predicate (strings)" + input: { + log: { + message: "key=value" + } + } + source: """ + if .message { + \t. |= parse_key_value!(.message) + } + """ + diff: """ + -if .message { + +if exists(.message) { + \t. |= parse_key_value!(.message) + } + """ + }] + } + "105": { + anchor: "105" + code: 105 + title: "Undefined function" + description: """ + A [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) invokes an + unknown function. + """ + rationale: null + resolution: "This is typically due to a typo. Correcting the function name should resolve this." + examples: [{ + title: "Undefined function (typo)" + source: "parse_keyvalue(.message)" + diff: """ + -parse_keyvalue(.message) + +parse_key_value(.message) + """ + }] + } + "108": { + anchor: "108" + code: 108 + title: "Unknown function argument keyword" + description: """ + A [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) passes an + unknown named argument. + """ + rationale: null + resolution: "Correct the name to align with the documented argument names for the function." + examples: [{ + title: "Unknown function argument keyword" + source: "parse_timestamp(.timestamp, fmt: \"%D\")" + diff: """ + -parse_timestamp(.timestamp) + +parse_timestamp(.timestamp, format: "%D") + """ + }] + } + "107": { + anchor: "107" + code: 107 + title: "Required function argument missing" + description: """ + A [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) fails to pass + a required argument. + """ + rationale: null + resolution: "Supply all of the required function arguments to adhere to the function's documented signature." + examples: [{ + title: "Required function argument missing" + source: "parse_timestamp(.timestamp)" + diff: """ + -parse_timestamp(.timestamp) + +parse_timestamp(.timestamp, format: "%D") + """ + }] + } + "109": { + anchor: "109" + code: 109 + title: "Cannot abort function" + description: """ + A [function call expression](https://vector.dev/docs/reference/vrl/expressions/#regular_expression) can't end with + `!` unless it's _fallible_. If a function can't produce a runtime error, it doesn't have an abort variant + that ends with `!`. + """ + rationale: null + resolution: "Remove the `!` from the end of the function name." + examples: [{ + title: "Cannot abort function" + source: "downcase!(.message)" + diff: """ + -downcase!(.message) + +downcase(.message) + """ + }] + } + } + examples: [{ + title: "Parse Syslog logs" + input: { + log: { + message: "<102>1 2020-12-22T15:22:31.111Z vector-user.biz su 2666 ID389 - Something went wrong" + } + } + source: """ + structured = parse_syslog!(.message) + . = merge(., structured) + """ + output: { + log: { + appname: "su" + facility: "ntp" + hostname: "vector-user.biz" + message: "Something went wrong" + msgid: "ID389" + procid: 2666 + severity: "info" + timestamp: "2020-12-22T15:22:31.111Z" + } + } + notes: ["Attributes are coerced into their proper types, including `timestamp`."] + }, { + title: "Parse key/value (logfmt) logs" + input: { + log: { + message: "@timestamp=\"Sun Jan 10 16:47:39 EST 2021\" level=info msg=\"Stopping all fetchers\" tag#production=stopping_fetchers id=ConsumerFetcherManager-1382721708341 module=kafka.consumer.ConsumerFetcherManager" + } + } + source: """ + structured = parse_key_value!(.message) + . = merge(., structured) + """ + output: { + log: { + "@timestamp": "Sun Jan 10 16:47:39 EST 2021" + level: "info" + msg: "Stopping all fetchers" + "tag#production": "stopping_fetchers" + id: "ConsumerFetcherManager-1382721708341" + module: "kafka.consumer.ConsumerFetcherManager" + } + } + warnings: ["All attributes are strings and will require manual type coercing."] + }, { + title: "Parse custom logs" + input: { + log: { + message: "2021/01/20 06:39:15 [error] 17755#17755: *3569904 open() \"/usr/share/nginx/html/test.php\" failed (2: No such file or directory), client: xxx.xxx.xxx.xxx, server: localhost, request: \"GET /test.php HTTP/1.1\", host: \"yyy.yyy.yyy.yyy\"" + } + } + source: """ + structured = parse_regex!(.message, /^(?P\\d+/\\d+/\\d+ \\d+:\\d+:\\d+) \\[(?P\\w+)\\] (?P\\d+)#(?P\\d+):(?: \\*(?P\\d+))? (?P.*)$/) + . = merge(., structured) + + # Coerce parsed fields + .timestamp = parse_timestamp(.timestamp, "%Y/%m/%d %H:%M:%S") ?? now() + .pid = to_int(.pid) + .tid = to_int(.tid) + + # Extract structured data + message_parts = split(.message, ", ", limit: 2) + structured = parse_key_value(message_parts[1], key_value_delimiter: ":", field_delimiter: ",") ?? {} + .message = message_parts[0] + . = merge(., structured) + """ + output: { + log: { + timestamp: "2021/01/20 06:39:15" + severity: "error" + pid: "17755" + tid: "17755" + connid: "3569904" + message: "open() \"/usr/share/nginx/html/test.php\" failed (2: No such file or directory)" + client: "xxx.xxx.xxx.xxx" + server: "localhost" + request: "GET /test.php HTTP/1.1" + host: "yyy.yyy.yyy.yyy" + } + } + }, { + title: "Multiple parsing strategies" + input: { + log: { + message: "<102>1 2020-12-22T15:22:31.111Z vector-user.biz su 2666 ID389 - Something went wrong" + } + } + source: """ + structured = + parse_syslog(.message) ?? + parse_common_log(.message) ?? + parse_regex!(.message, /^(?P\\d+/\\d+/\\d+ \\d+:\\d+:\\d+) \\[(?P\\w+)\\] (?P\\d+)#(?P\\d+):(?: \\*(?P\\d+))? (?P.*)$/) + . = merge(., structured) + """ + output: { + log: { + appname: "su" + facility: "ntp" + hostname: "vector-user.biz" + message: "Something went wrong" + msgid: "ID389" + procid: 2666 + severity: "info" + timestamp: "2020-12-22 15:22:31.111 UTC" + } + } + }, { + title: "Modify metric tags" + input: { + metric: { + kind: "incremental" + name: "user_login_total" + counter: { + value: 102.0 + } + tags: { + host: "my.host.com" + instance_id: "abcd1234" + email: "vic@vector.dev" + } + } + } + source: """ + .environment = get_env_var!("ENV") # add + .hostname = del(.host) # rename + del(.email) + """ + output: { + metric: { + kind: "incremental" + name: "user_login_total" + counter: { + value: 102.0 + } + tags: { + environment: "production" + hostname: "my.host.com" + instance_id: "abcd1234" + } + } + } + }, { + title: "Invalid argument type" + input: { + log: { + not_a_string: 1 + } + } + source: "upcase(.not_a_string)" + raises: { + compiletime: """ + error: invalid argument type + ┌─ :1:1 + │ + 1 │ upcase(.not_a_string) + │ ^^^^^^^^^^^^^ + │ │ + │ this expression resolves to unknown type + │ but the parameter "value" expects the exact type "string" + │ + = see language documentation at: https://vector.dev/docs/reference/vrl/ + """ + } + }, { + title: "Unhandled error" + input: { + log: { + message: "key1=value1 key2=value2" + } + } + source: "structured = parse_key_value(.message)" + raises: { + compiletime: """ + error: unhandled error + ┌─ :1:1 + │ + 1 │ structured = parse_key_value(.message) + │ ^^^^^^^^^^ + │ │ + │ expression can result in runtime error + │ handle the error case to ensure runtime success + │ + = see error handling documentation at: https://vector.dev/docs/reference/vrl/errors/ + = see language documentation at: https://vector.dev/docs/reference/vrl/ + """ + } + }] + expressions: { + arithmetic: { + anchor: "arithmetic" + name: "arithmetic" + title: "Arithmetic" + description: """ + An _arithmetic_ expression performs an operation on two expressions (operands) as defined by the operator. + + Although arithmetic is commonly applied to numbers, you can use it with other types as well, such as strings. + """ + return: "Returns the result of the expression as defined by the operator." + grammar: { + source: "expression ~ operator ~ expression" + definitions: { + expression: { + name: "expression" + description: "The `expression` can be any expression that returns a valid type as defined by the `operator`." + } + operator: { + name: "operator" + description: "The `operator` defines the operation performed on the left-hand- and right-hand-side operands." + enum: { + "+": "Sum. Operates on `int`, `float`, and `string` types." + "-": "Difference. Operates on `int` and `float` types." + "*": "Multiplication. Operates on `int` and `float` types." + "/": "Float division. Operates on `int` and `float` types. _Always_ produces a `float`." + "//": "Integer division. Operates on `int` and `float` types. _Always_ produces a `int`." + "%": "Remainder. Operates on `int` and `float` types. _Always_ produces an `int`." + } + } + } + } + examples: [{ + title: "Sum (int)" + source: "1 + 1" + return: 2 + }, { + title: "Sum (float)" + source: "1.0 + 1.0" + return: 2.0 + }, { + title: "Sum (numeric)" + source: "1 + 1.0" + return: 2.0 + }, { + title: "Sum (string)" + source: "\"Hello\" + \", \" + \"World!\"" + return: "Hello, World!" + }, { + title: "Difference (int)" + source: "2 - 1" + return: 1 + }, { + title: "Difference (float)" + source: "2.0 - 1.0" + return: 1.0 + }, { + title: "Difference (numeric)" + source: "2.0 - 1" + return: 1.0 + }, { + title: "Multiplication (int)" + source: "2 * 1" + return: 2 + }, { + title: "Multiplication (float)" + source: "2.0 * 1.0" + return: 2.0 + }, { + title: "Multiplication (numeric)" + source: "2.0 * 1" + return: 2.0 + }, { + title: "Float division (int)" + source: "2 / 1" + return: 2.0 + }, { + title: "Float division (float)" + source: "2.0 / 1.0" + return: 2.0 + }, { + title: "Float division (numeric)" + source: "2.0 / 1" + return: 2.0 + }, { + title: "Integer division (int)" + source: "2 / 1" + return: 2 + }, { + title: "Integer division (float)" + source: "2.0 / 1.0" + return: 2 + }, { + title: "Integer division (numeric)" + source: "2.0 / 1" + return: 2 + }, { + title: "Remainder" + source: "3 % 2" + return: 1 + }] + } + coalesce: { + anchor: "coalesce" + name: "coalesce" + title: "Coalesce" + description: """ + A _coalesce_ expression is composed of multiple expressions (operands) delimited by a coalesce operator, + short-circuiting on the first expression that doesn't violate the operator condition. + """ + return: "Returns the value of the first expression (operand) that doesn't violate the operator condition." + grammar: { + source: "expression ~ (operator ~ expression)+" + definitions: { + expression: { + name: "expression" + description: "The `expression` (operand) can be any expression." + } + operator: { + name: "operator" + description: "The `operator` delimits two or more `expression`s." + enum: { + "??": """ + The `??` operator performs error coalescing, short-circutiing on the first expression that + doesn't error and returning its result. + """ + } + } + } + } + examples: [{ + title: "Error coalescing" + source: "parse_syslog(\"not syslog\") ?? parse_apache_log(\"not apache\") ?? \"malformed\"" + return: "malformed" + }] + } + logical: { + anchor: "logical" + name: "logical" + title: "Logical" + description: """ + A _logical_ expression compares two expressions (operands), short-circuiting on the last expression evaluated + as defined by the operator. + """ + return: "Returns the last expression (operand) evaluated as defined by the operator." + grammar: { + source: "expression ~ operator ~ expression" + definitions: { + expression: { + name: "expression" + description: """ + The `expression` (operand) can be any expression that returns a valid type as defined by the + `operator`. + """ + } + operator: { + name: "operator" + description: "The `operator` defines the operation performed on the left-hand- and right-hand-side operations." + enum: { + "&&": "Conditional AND. Supports boolean expressions only." + "||": "Conditional OR. Supports any expression." + "!": "NOT. Supports boolean expressions only." + } + } + } + } + examples: [{ + title: "AND" + source: "true && true" + return: true + }, { + title: "OR (boolean)" + source: "false || \"foo\"" + return: "foo" + }, { + title: "OR (null)" + source: "null || \"foo\"" + return: "foo" + }, { + title: "NOT" + source: "!false" + return: true + }] + } + index: { + anchor: "index" + name: "index" + title: "Index" + description: "An _index_ expression denotes an element of an array. Array indices in VRL start at zero." + return: "Returns the element in the position of the supplied index." + grammar: { + source: "\"[\" ~ index ~ \"]\"" + definitions: { + index: { + name: "index" + description: "The `index` represents the zero-based position of the element." + characteristics: { + zero_based: { + anchor: "zero_based" + name: "zero_based" + title: "Zero-based indices" + description: "Indexes are zero-based where `0` represents the first array element." + } + } + } + } + } + examples: [{ + title: "Array index expression" + input: { + log: { + array: ["first", "second"] + } + } + source: ".array[0]" + return: "first" + }] + } + block: { + anchor: "block" + name: "block" + title: "Block" + description: """ + A _block_ expression is a sequence of one or more expressions within matching brace brackets. + + Blocks can't be empty. Instead, empty blocks (`{}`) are treated as blank objects. + """ + return: "Returns the result of the last evaluated expression within the block." + grammar: { + source: "\"{\" ~ NEWLINE* ~ expressions ~ NEWLINE* ~ \"}\"" + definitions: { + expressions: { + name: "expressions" + description: "One or more expresions." + } + } + } + examples: [{ + title: "Simple block" + source: """ + { + \tmessage = "{\\"Hello\\": \\"World!\\"}" + \tparse_json!(message) + } + """ + return: { + Hello: "World!" + } + }, { + title: "Assignment block" + source: """ + .structured = { + \tmessage = "{\\"Hello\\": \\"World!\\"}" + \tparse_json!(message) + } + """ + return: { + Hello: "World!" + } + output: { + log: { + structured: { + Hello: "World!" + } + } + } + }] + } + comparison: { + anchor: "comparison" + name: "comparison" + title: "Comparison" + description: """ + A _comparison_ expression compares two expressions (operands) and produces a Boolean as defined by the + operator. + """ + return: "Returns a Boolean as defined by the operator." + grammar: { + source: "expression ~ operator ~ expression" + definitions: { + expression: { + name: "expression" + description: """ + The `expression` (operand) can be any expression that returns a valid type as defined by the + `operator`. + """ + } + operator: { + name: "operator" + description: "The `operator` defines the operation performed on the left-hand and right-hand side operations." + enum: { + "==": "Equal. Operates on all types." + "!=": "Not equal. Operates on all types." + ">=": "Greater than or equal. Operates on `int` and `float` types." + ">": "Greater than. Operates on `int` and `float` types." + "<=": "Less than or equal. Operates on `int` and `float` types." + "<": "Less than. Operates on `int` and `float` types." + } + } + } + } + examples: [{ + title: "Equal" + source: "1 == 1" + return: true + }, { + title: "Not equal" + source: "1 != 2" + return: true + }, { + title: "Greater than or equal" + source: "2 >= 2" + return: true + }, { + title: "Greater than" + source: "2 > 1" + return: true + }, { + title: "Less than or equal" + source: "2 <= 2" + return: true + }, { + title: "Less than" + source: "1 < 2" + return: true + }] + } + variable: { + anchor: "variable" + name: "variable" + title: "Variable" + description: """ + A _variable_ expression names variables. A variable is a sequence of one or more letters and digits. The first + character in a variable must be a letter. + """ + return: "Returns the value of the variable." + grammar: { + source: "first ~ (trailing)*" + definitions: { + first: { + name: "first" + description: "The `first` character can only be an alpha-numeric character (`a-zA-Z0-9`)." + } + trailing: { + name: "trailing" + description: """ + The `trailing` characters must only contain ASCII alpha-numeric and underscore characters + (`a-zA-Z0-9_`). + """ + } + } + } + examples: [{ + title: "Simple variable" + source: "my_variable = 1" + return: 1 + }, { + title: "Variable with path" + source: """ + my_object = { "one": 1 } + my_object.one + """ + return: 1 + }] + } + function_call: { + anchor: "function_call" + name: "function_call" + title: "Function call" + description: "A _function call_ expression invokes built-in [VRL functions](https://vector.dev/docs/reference/vrl/functions/)." + return: """ + Returns the value of the function invocation if the invocation succeeds. If the invocation fails, the error must + be [handled](https://vector.dev/docs/reference/vrl/errors/) and null is returned. + + Functions can _only_ return a single value. If multiple values are relevant, you should wrap them in a data + structure fit to hold them, such as an array or object (note that VRL doesn't support tuples). + """ + grammar: { + source: "function ~ abort? ~ \"(\" ~ arguments? ~ \")\"" + definitions: { + function: { + name: "function" + description: "`function` represents the name of the built-in function." + } + abort: { + name: "abort" + description: """ + `abort` represents a literal `!` that can optionally be used with fallible functions to abort + the program when the function fails: + + ```vrl + result = f!() + ``` + + Otherwise, errors must be handled: + + ```vrl + result, err = f() + ``` + + Failure to handle errors from fallible functions results in compile-time errors. See the + [error reference](https://vector.dev/docs/reference/vrl/errors/) for more info. + """ + } + arguments: { + name: "arguments" + description: """ + The `arguments` are comma-delimited expressions that can optionally\tbe prefixed with the + documented name. + """ + characteristics: { + named: { + anchor: "named" + name: "named" + title: "Named arguments" + description: """ + _All_ function arguments in VRL are assigned names, including required leading arguments. + Named arguments are suffixed with a colon (`:`), with the value proceeding the name: + + ```vrl + argument_name: "value" + argument_name: (1 + 2) + ``` + + The value is treated as another expression. + """ + } + positional: { + anchor: "positional" + name: "positional" + title: "Positional arguments" + description: """ + Function calls support nameless positional arguments. Arguments must be supplied in the order + they are documented: + + ```vrl + f(1, 2) + ``` + """ + } + type_safety: { + anchor: "type_safety" + name: "type_safety" + title: "Argument type safety" + description: """ + Function arguments enforce type safety when the type of the value supplied is known: + + ```vrl + round("not a number") # fails at compile time + ``` + + If the type of the value is not known, you need to handle the potential argument error: + + ```vrl + number = int(.message) ?? 0 + round(number) + ``` + + See the [errors reference](https://vector.dev/docs/reference/vrl/errors/) for more info. + """ + } + } + } + } + } + examples: [{ + title: "Positional function invocation" + source: "split(\"hello, world!\", \", \")" + return: ["hello", "world!"] + }, { + title: "Named function invocation (ordered)" + source: "split(\"hello, world!\", pattern: \", \")" + return: ["hello", "world!"] + }, { + title: "Named function invocation (unordered)" + source: "split(pattern: \", \", value: \"hello, world!\")" + return: ["hello", "world!"] + }] + } + if: { + anchor: "if" + name: "if" + title: "If" + description: """ + An _if_ expression specifies the conditional execution of two branches according to the value of a Boolean + expression. If the Boolean expression evaluates to `true`, the "if" branch is executed, otherwise the "else" + branch is executed (if present). + """ + return: "The result of the last expression evaluated in the executed branch or null if no expression is evaluated." + grammar: { + source: "\"if\" ~ predicate ~ block ~ (\"else if\" ~ predicate ~ block)* ~ (\"else\" ~ block)?" + definitions: { + predicate: { + name: "predicate" + description: """ + The `predicate` _must_ be an expression that resolves to a Boolean. If a Boolean isn't returned, a + compile-time error is raised. + """ + } + } + } + examples: [{ + title: "True if expression" + source: """ + if true { + \t"Hello, World!" + } + """ + return: "Hello, World!" + }, { + title: "False if expression" + source: """ + if false { + \t# not evaluated + } + """ + return: null + }, { + title: "If/else expression" + source: """ + if false { + \t# not evaluated + } else { + \t"Hello, World!" + } + """ + return: "Hello, World!" + }, { + title: "If/else if/else expression" + source: """ + if false { + \t# not evaluated + } else if false { + \t# not evaluated + } else { + \t"Hello, World!" + } + """ + return: "Hello, World!" + }] + } + assignment: { + anchor: "assignment" + name: "assignment" + title: "Assignment" + description: """ + An _assignment_ expression assigns the result of the right-hand-side expression to the left-hand-side + target (path or variable). + """ + return: """ + Returns the value of the right-hand-side expression only if the expression succeeds. If the expression errors, + the error must be [handled](https://vector.dev/docs/reference/vrl/errors/) and null is returned. + """ + grammar: { + source: "target ~ (\",\" ~ error)? ~ operator ~ expression" + definitions: { + target: { + name: "target" + description: """ + The `target` must be a path, + with an optional second variable for error handling if the right-hand side is fallible. + """ + } + error: { + name: "error" + description: """ + The `error` allows for optional assignment to errors when the right-hand-side expression is + fallible. This is commonly used when invoking fallible functions. + """ + } + operator: { + name: "operator" + description: "The `operator` delimits the `target` and `expression` and defines assignment conditions." + enum: { + "=": """ + Simple assignment operator. Assigns the result from the right-hand side to the left-hand side: + + ```vrl + .field = "value" + ``` + """ + "??=": """ + Assigns _only_ if the right-hand side doesn't error. This is useful when invoking fallible + functions on the right-hand side: + + ```vrl + .structured ??= parse_json(.message) + ``` + """ + } + } + expression: { + name: "expression" + description: """ + If the `target` is a variable, the `expression` can be any expression. + + If the `target` is a path, the `expression` can be any expression that returns a supported object + value type (i.e. not a regular expression). + """ + } + } + } + examples: [{ + title: "Path assignment" + source: ".message = \"Hello, World!\"" + return: "Hello, World!" + output: { + log: { + message: "Hello, World!" + } + } + }, { + title: "Nested path assignment" + source: ".parent.child = \"Hello, World!\"" + return: "Hello, World!" + output: { + log: { + parent: { + child: "Hello, World!" + } + } + } + }, { + title: "Double assignment" + source: ".first = .second = \"Hello, World!\"" + return: "Hello, World!" + output: { + log: { + first: "Hello, World!" + second: "Hello, World!" + } + } + }, { + title: "Array element assignment" + source: ".array[1] = \"Hello, World!\"" + return: "Hello, World!" + output: { + log: { + array: [null, "Hello, World!"] + } + } + }, { + title: "Variable assignment" + source: "my_variable = \"Hello, World!\"" + return: "Hello, World!" + }, { + title: "Fallible assignment (success)" + source: ".parsed, .err = parse_json(\"{\\\"Hello\\\": \\\"World!\\\"}\")" + output: { + log: { + parsed: { + Hello: "World" + } + err: null + } + } + }, { + title: "Fallible assignment (error)" + source: ".parsed, .err = parse_json(\"malformed\")" + output: { + log: { + parsed: null + err: "function call error for \"parse_json\" at (14:37): unable to parse json: expected value at line 1 column 1" + } + } + }] + } + path: { + anchor: "path" + name: "path" + title: "Path" + description: """ + A _path_ expression is a sequence of period-delimited segments that represent the location of a value + within an object. + """ + return: "Returns the value of the path location." + grammar: { + source: "\".\" ~ path_segments" + definitions: { + "\".\"": { + name: "\".\"" + description: """ + The `"."` character represents the root of the event. Therefore, _all_ paths must begin with the `.` + character, and `.` alone is a valid path. + """ + } + path_segments: { + name: "path_segments" + description: """ + `path_segments` denote a segment of a nested path. Each segment must be delimited by a `.` character + and only contain alpha-numeric characters and `_` (`a-zA-Z0-9_`). Segments that contain + characters outside of this range must be quoted. + """ + characteristics: { + array_elements: { + anchor: "array_elements" + name: "array_elements" + title: "Array element paths" + description: """ + Array elements can be accessed by their index: + + ```vrl + .array[0] + ``` + """ + } + coalescing: { + anchor: "coalescing" + name: "coalescing" + title: "Path segment coalecing" + description: """ + Path segments can be coalesced, allowing for the first non-null value to be used. This is + particularly useful when working with + [externally tagged](https://serde.rs/enum-representations.html#externally-tagged) data: + + ```vrl + .grand_parent.(parent1 | parent2).child + ``` + """ + } + dynamic: { + anchor: "dynamic" + name: "dynamic" + title: "Dynamic paths" + description: "Dynamic paths are currently not supported." + } + nested_objects: { + anchor: "nested_objects" + name: "nested_objects" + title: "Nested object paths" + description: """ + Nested object values are accessed by delimiting each ancestor path with `.`: + + ```vrl + .parent.child + ``` + """ + } + nonexistent: { + anchor: "nonexistent" + name: "nonexistent" + title: "Non-existent paths" + description: "Non-existent paths resolve to `null`." + } + quoting: { + anchor: "quoting" + name: "quoting" + title: "Path quoting" + description: """ + Path segments can be quoted to include special characters, such as spaces, periods, and + others: + + ```vrl + ."parent.key.with.special \\"characters\\"".child + ``` + """ + } + valid_characters: { + anchor: "valid_characters" + name: "valid_characters" + title: "Valid path characters" + description: """ + Path segments only allow for underscores and ASCII alpha-numeric characters + (`[a-zA-Z0-9_]`). Segments must be delimited with periods (`.`). If a segment contains + characters outside of this list it must be quoted. + """ + } + } + } + } + } + examples: [{ + title: "Root path" + input: { + log: { + message: "Hello, World!" + } + } + source: "." + return: { + message: "Hello, World!" + } + }, { + title: "Top-level path" + input: { + log: { + message: "Hello, World!" + } + } + source: ".message" + return: "Hello, World!" + }, { + title: "Nested path" + input: { + log: { + parent: { + child: "Hello, World!" + } + } + } + source: ".parent.child" + return: "Hello, World!" + }, { + title: "Nested path coalescing" + input: { + log: { + grand_parent: { + parent2: { + child: "Hello, World!" + } + } + } + } + source: ".grand_parent.(parent1 | parent2).child" + return: "Hello, World!" + }, { + title: "Array element path (first)" + input: { + log: { + array: ["first", "second"] + } + } + source: ".array[0]" + return: "first" + }, { + title: "Array element path (second)" + input: { + log: { + array: ["first", "second"] + } + } + source: ".array[1]" + return: "second" + }, { + title: "Quoted path" + input: { + log: { + "parent.key.with.special characters": { + child: "Hello, World!" + } + } + } + source: ".\"parent.key.with.special characters\".child" + return: "Hello, World!" + }] + } + } + features: { + memory_safety: { + anchor: "memory_safety" + name: "memory_safety" + title: "Memory safety" + description: """ + VRL inherits Rusts's [memory safety](https://en.wikipedia.org/wiki/Memory_safety) guarantees, protecting you from + [common software bugs and security vulnerabilities](https://thenewstack.io/microsoft-rust-is-the-industrys-best-chance-at-safe-systems-programming/) that stem from improper memory + access. This makes VRL ideal for infrastructure use cases, like observability pipelines, where reliability and + security are top concerns. + """ + principles: { + performance: false + safety: true + } + characteristics: {} + } + native: { + anchor: "native" + name: "native" + title: "Vector & Rust native" + description: """ + Like Vector, VRL is built with [Rust](https://www.rust-lang.org/) and compiles to native Rust code. Therefore, it inherits + Rust's safety and performance characteristics that make it ideal for observability pipelines. And because both + VRL and Vector are written in Rust, they are tightly integrated, avoiding communication inefficiencies such as + event serialization or [foreign function interfaces](https://en.wikipedia.org/wiki/Foreign_function_interface) (FFI). This makes VRL significantly faster + than non-Rust alternatives. + """ + principles: { + performance: true + safety: true + } + characteristics: { + lack_of_gc: { + anchor: "lack_of_gc" + name: "lack_of_gc" + title: "Lack of garbage collection" + description: """ + Rust's [affine type system](https://en.wikipedia.org/wiki/Substructural_type_system#Affine_type_systems) avoids the need for garbage collection, making + VRL exceptionally fast, memory efficient, and memory safe. Memory is precisely allocated and freed, + avoiding the pauses and performance pitfalls associated with garbage collectors. + """ + } + } + } + quality_error_messages: { + anchor: "quality_error_messages" + name: "quality_error_messages" + title: "Quality error messages" + description: """ + VRL strives to provide high-quality, helpful error messages, streamling the development and iteration workflow + around VRL programs. + + This VRL program, for example... + + ```vrl + .foo, err = upcase(.foo) + ``` + + ...would result in this error: + + ```rust + error: program aborted + ┌─ :2:1 + │ + 2 │ parse_json!(1) + │ ^^^^^^^^^^^^^^ + │ │ + │ function call error + │ unable to parse json: key must be a string at line 1 column 3 + │ + = see function documentation at: https://master.vector.dev/docs/reference/vrl/functions/#parse_json + = see language documentation at: https://vector.dev/docs/reference/vrl/ + ``` + """ + principles: { + performance: false + safety: false + } + characteristics: {} + } + fail_safety: { + anchor: "fail_safety" + name: "fail_safety" + title: "Fail-safety" + description: """ + VRL programs are [fail safe](https://en.wikipedia.org/wiki/Fail-safe), meaning that a VRL program won't compile unless all errors + thrown by fallible functions are handled. This eliminates unexpected runtime errors that often plague production + observability pipelines with data loss and downtime. See the [error reference](https://vector.dev/docs/reference/vrl/errors/) for + more information on VRL errors. + """ + principles: { + performance: false + safety: true + } + characteristics: {} + } + type_safety: { + anchor: "type_safety" + name: "type_safety" + title: "Type-safety" + description: """ + VRL implements [_progressive_](#progressive) [type safety](https://en.wikipedia.org/wiki/Type_safety), + erroring at [compilation-time](#compilation) if a type mismatch is detected. + """ + principles: { + performance: false + safety: false + } + characteristics: { + progressive: { + anchor: "progressive" + name: "progressive" + title: "Progressive type-safety" + description: """ + VRL's type-safety is _progressive_, meaning it will implement type-safety for any value for which it + knows the type. Because observability data can be quite unpredictable, it's not always known which + type a field might be, hence the _progressive_ nature of VRL's type-safety. As VRL scripts are + evaluated, type information is built up and used at compile-time to enforce type-safety. Let's look + at an example: + + ```vrl + .foo # any + .foo = downcase!(.foo) # string + .foo = upcase(.foo) # string + ``` + + Breaking down the above: + + 1. The `.foo` field starts off as an `any` type (AKA unknown). + 2. The call to the `downcase!` function requires error handling (`!`) since VRL cannot guarantee that + `.foo` is a string (the only type supported by `downcase`). + 3. Afterwards, assuming the `downcase` invocation is successful, VRL knows that `.foo` is a string, + since `downcase` can only return strings. + 4. Finally, the call to `upcase` does not require error handling (`!`) since VRL knows that `.foo` is a + string, making the `upcase` invocation infallible. + + To avoid error handling for argument errors, you can specify the types of your fields at the top + of your VRL script: + + ```vrl + .foo = string!(.foo) # string + + .foo = downcase(.foo) # string + ``` + + This is generally good practice, and it provides the ability to opt-into type safety as you see fit, + VRL scripts are written once and evaluated many times, therefore the tradeoff for type safety will + ensure reliable production execution. + """ + } + } + } + stateless: { + anchor: "stateless" + name: "stateless" + title: "Stateless" + description: """ + VRL programs are stateless, operating on a single event at a time. This makes VRL programs simple, fast, and + safe. Operations involving state across events, such as [deduplication](https://vector.dev/docs/reference/transforms/dedupe/), are + delegated to other Vector transforms designed specifically for stateful operations. + """ + principles: { + performance: true + safety: true + } + characteristics: {} + } + logs_and_metrics: { + anchor: "logs_and_metrics" + name: "logs_and_metrics" + title: "Logs and metrics" + description: """ + VRL works with both [logs](https://vector.dev/docs/about/data-model/log/) and [metrics](https://vector.dev/docs/about/data-model/metric/) within Vector, making it + usable for all [Vector events](https://vector.dev/docs/about/under-the-hood/architecture/data-model/). + """ + principles: { + performance: false + safety: false + } + characteristics: {} + } + ergonomic_safety: { + anchor: "ergonomic_safety" + name: "ergonomic_safety" + title: "Ergonomic safety" + description: """ + VRL is ergonomically safe in that it makes it difficult to create slow or buggy VRL programs. + While VRL's [compile-time checks](#compilation) prevent runtime errors, they can't prevent + some of the more elusive performance and maintainability problems that stem from program complexity—problems + that can result in observability pipeline instability and unexpected resource costs. To protect against these + more subtle ergonomic problems, VRL is a carefully *limited* language that offers only those features necessary + to transform observability data. Any features that are extraneous to that task or likely to result in degraded + ergonomics are omitted from the language by design. + """ + principles: { + performance: true + safety: true + } + characteristics: { + internal_logging_limitation: { + anchor: "internal_logging_limitation" + name: "internal_logging_limitation" + title: "Internal logging limitation" + description: "VRL programs do produce internal logs but not a rate that's bound to saturate I/O." + } + io_limitation: { + anchor: "io_limitation" + name: "io_limitation" + title: "I/O limitation" + description: """ + VRL lacks access to system I/O, which tends to be computationally expensive, to require careful + caching, and to produce degraded performance. + """ + } + recursion_limitation: { + anchor: "recursion_limitation" + name: "recursion_limitation" + title: "Lack of recursion" + description: """ + VRL lacks recursion capabilities, making it impossible to create large or infinite loops that could + stall VRL programs or needlessly drain memory. + """ + } + no_custom_functions: { + anchor: "no_custom_functions" + name: "no_custom_functions" + title: "Lack of custom functions" + description: """ + VRL requires you to use only its built-in functions and doesn't enable you to create your own. This + keeps VRL programs easy to debug and reason about. + """ + } + state_limitation: { + anchor: "state_limitation" + name: "state_limitation" + title: "Lack of state" + description: """ + VRL lacks the ability to hold and maintain state across events. This prevents things like unbounded + memory growth, hard-to-debug production issues, and unexpected program behavior. + """ + } + rate_limited_logging: { + anchor: "rate_limited_logging" + name: "rate_limited_logging" + title: "Rate-limited logging" + description: """ + The VRL `log` function implements rate limiting by default. This ensures that VRL programs invoking the + `log` method don't accidentally saturate I/O. + """ + } + purpose_built: { + anchor: "purpose_built" + name: "purpose_built" + title: "Purpose built for observability" + description: """ + VRL is laser focused on observability use cases and *only* those use cases. This makes many + frustration- and complexity-producing constructs you find in other languages completely superfluous. + Functions like `parse_syslog` and `parse_key_value`, for example, make otherwise complex tasks simple + and prevent the need for complex low-level constructs. + """ + } + } + } + compilation: { + anchor: "compilation" + name: "compilation" + title: "Compilation" + description: """ + VRL programs are compiled to and run as native Rust code. This has several important implications: + + * VRL programs are extremely fast and efficient, with performance characteristics very close to Rust itself + * VRL has no runtime and thus imposes no per-event foreign function interface (FFI) or data conversion costs + * VRL has no garbage collection, which means no GC pauses and no accumulated memory usage across events + """ + principles: { + performance: true + safety: true + } + characteristics: { + fail_safety_checks: { + anchor: "fail_safety_checks" + name: "fail_safety_checks" + title: "Fail safety checks" + description: """ + At compile time, Vector performs [fail safety](#fail_safety) checks to ensure that + all errors thrown by fallible functions are [handled](https://vector.dev/docs/reference/vrl/errors/#handling). If you fail to pass a + string to the `parse_syslog` function, for example, the VRL compiler aborts and provides a helpful error + message. Fail safety means that you need to make explicit decisions about how to handle potentially + malformed data—a superior alternative to being surprised by such issues when Vector is already handling + your data in production. + """ + } + type_safety_checks: { + anchor: "type_safety_checks" + name: "type_safety_checks" + title: "Type safety checks" + description: """ + At compile time, Vector performs [type safety](#type_safety) checks to catch runtime + errors stemming from type mismatches, for example passing an integer to the `parse_syslog` function, + which can only take a string. VRL essentially forces you to write programs around the assumption that + every incoming event could be malformed, which provides a strong bulwark against both human error and + also the many potential consequences of malformed data. + """ + } + } + } + } + functions: { + parse_common_log: { + anchor: "parse_common_log" + name: "parse_common_log" + category: "Parse" + description: "Parses the `value` using the [Common Log Format](https://httpd.apache.org/docs/1.3/logs.html#common)." + notices: [""" + Missing information in the log message may be indicated by `-`. These fields will not be present in the result. + """] + arguments: [{ + name: "value" + description: "The string to parse." + required: true + multiple: false + type: ["string"] + }, { + name: "timestamp_format" + description: "The [date/time format](https://docs.rs/chrono/latest/chrono/format/strftime/index.html) the log message timestamp is encoded in." + required: false + multiple: false + default: "%d/%b/%Y:%T %z" + type: ["string"] + }] + internal_failure_reasons: ["`value` does not match the Common Log Format", "`timestamp_format` is not a valid format string", "timestamp in `value` fails to parse via the provided `timestamp_format`"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse via Common Log Format (with default timestamp format)" + source: "parse_common_log(\"127.0.0.1 bob frank [10/Oct/2000:13:55:36 -0700] \\\"GET /apache_pb.gif HTTP/1.0\\\" 200 2326\")" + return: { + host: "127.0.0.1" + identity: "bob" + user: "frank" + timestamp: "2000-10-10T20:55:36Z" + message: "GET /apache_pb.gif HTTP/1.0" + method: "GET" + path: "/apache_pb.gif" + protocol: "HTTP/1.0" + status: 200 + size: 2326 + } + }, { + title: "Parse via Common Log Format (with custom timestamp format)" + source: """ + parse_common_log( + \t"127.0.0.1 bob frank [2000-10-10T20:55:36Z] \\"GET /apache_pb.gif HTTP/1.0\\" 200 2326", + \t"%+" + ) + """ + return: { + host: "127.0.0.1" + identity: "bob" + user: "frank" + timestamp: "2000-10-10T20:55:36Z" + message: "GET /apache_pb.gif HTTP/1.0" + method: "GET" + path: "/apache_pb.gif" + protocol: "HTTP/1.0" + status: 200 + size: 2326 + } + }] + } + strip_whitespace: { + anchor: "strip_whitespace" + name: "strip_whitespace" + category: "String" + description: """ + Strips whitespace from the start and end of the `value`. + + Whitespace is as defined by [Unicode `White_Space` property](https://en.wikipedia.org/wiki/Unicode_character_property#Whitespace) + """ + notices: [] + arguments: [{ + name: "value" + description: "The string to trim." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Strip whitespace" + source: "strip_whitespace(\" A sentence. \")" + return: "A sentence." + }] + } + slice: { + anchor: "slice" + name: "slice" + category: "String" + description: """ + Returns a slice of the `value` between the `start` and `end` positions. + + If the `start` and `end` parameters are negative, they refer to positions counting from the right of the + string or array. If `end` refers to a position that is greater than the length of the string or array + a slice up to the end of the string or array is returned. + """ + notices: [] + arguments: [{ + name: "value" + description: "The string or array to slice." + required: true + multiple: false + type: ["array", "string"] + }, { + name: "start" + description: "The inclusive start position. A zero-based index that can be negative." + required: true + multiple: false + type: ["integer"] + }, { + name: "end" + description: "The inclusive end position. A zero-based index that can be negative." + required: false + multiple: false + default: "String length" + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Slice a string (positve index)" + source: "slice(\"Supercalifragilisticexpialidocious\", start: 5, end: 13)" + return: "califrag" + }, { + title: "Slice a string (negative index)" + source: "slice(\"Supercalifragilisticexpialidocious\", start: 5, end: -14)" + return: "califragilistic" + }] + } + round: { + anchor: "round" + name: "round" + category: "Number" + description: "Rounds the `value` to the specified `precision`." + notices: [] + arguments: [{ + name: "value" + description: "The number to round." + required: true + multiple: false + type: ["integer", "float"] + }, { + name: "precision" + description: "The number of decimal places to round to." + required: false + multiple: false + default: 0 + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["integer", "float"] + rules: ["If `precision` is `0`, then an integer is returned, otherwise a float is returned."] + } + examples: [{ + title: "Round a number (without precision)" + source: "round(4.345)" + return: 4 + }, { + title: "Round a number (with precision)" + source: "round(4.345, precision: 2)" + return: 4.35 + }] + } + int: { + anchor: "int" + name: "int" + category: "Type" + description: """ + Errors if `value` is not an integer, if `value` is an integer it is returned. + + This allows the type checker to guarantee that the returned value is an integer and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is an integer." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not an integer."] + return: { + types: ["integer"] + } + examples: [{ + title: "Declare an integer type" + input: { + log: { + value: 42 + } + } + source: "int(.value)" + return: 42 + }] + } + contains: { + anchor: "contains" + name: "contains" + category: "String" + description: "Determines if the `value` contains a `substring`." + notices: [] + arguments: [{ + name: "value" + description: "The text to search." + required: true + multiple: false + type: ["string"] + }, { + name: "substring" + description: "The substring to search for in `value`." + required: true + multiple: false + type: ["string"] + }, { + name: "case_sensitive" + description: "Should the match be case sensitive?" + required: false + multiple: false + type: ["boolean"] + default: true + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "String contains (case sensitive)" + source: "contains(\"The Needle In The Haystack\", \"Needle\")" + return: true + }, { + title: "String contains (case insensitive)" + source: "contains(\"The Needle In The Haystack\", \"needle\", case_sensitive: false)" + return: true + }] + } + md5: { + anchor: "md5" + name: "md5" + category: "Hash" + description: "Calculates an md5 hash of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The string to calculate the hash for." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Create md5 hash" + source: "md5(\"foo\")" + return: "acbd18db4cc2f85cedef654fccc4a4d8" + }] + } + encode_json: { + anchor: "encode_json" + name: "encode_json" + category: "Codec" + description: "Encodes the `value` to JSON." + notices: [] + arguments: [{ + name: "value" + description: "The value to return a json representation of." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Encode to JSON" + source: ".payload = encode_json({\"hello\": \"world\"})" + return: "{\"hello\": \"world\"}" + }] + } + to_bool: { + anchor: "to_bool" + name: "to_bool" + category: "Coerce" + description: "Coerces the `value` into a boolean." + notices: [] + arguments: [{ + name: "value" + description: "The value to convert to a Boolean." + required: true + multiple: false + type: ["boolean", "integer", "float", "null", "string"] + }] + internal_failure_reasons: ["`value` is not a supported boolean representation"] + return: { + types: ["boolean"] + rules: [#"If `value` is `"true"`, `"t"`, `"yes"`, `"y"` then `true` is returned."#, #"If `value` is `"false"`, `"f"`, `"no"`, `"n"`, `"0"` then `false` is returned."#, #"If `value` is `0.0` then `false` is returned, otherwise `true` is returned."#, #"If `value` is `0` then `false` is returned, otherwise `true` is returned."#, #"If `value` is `null` then `false` is returned."#, #"If `value` is a boolean then it is passed through."#] + } + examples: [{ + title: "Coerce to a boolean (string)" + source: "to_bool(\"yes\")" + return: true + }, { + title: "Coerce to a boolean (float)" + source: "to_bool(0.0)" + return: false + }, { + title: "Coerce to a boolean (int)" + source: "to_bool(0)" + return: false + }, { + title: "Coerce to a boolean (null)" + source: "to_bool(null)" + return: false + }, { + title: "Coerce to a boolean (boolean)" + source: "to_bool(true)" + return: true + }] + } + floor: { + anchor: "floor" + name: "floor" + category: "Number" + description: "Rounds the `value` down to the specified `precision`." + notices: [] + arguments: [{ + name: "value" + description: "The number to round down." + required: true + multiple: false + type: ["integer", "float"] + }, { + name: "precision" + description: "The number of decimal places to round to." + required: false + multiple: false + default: 0 + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["integer", "float"] + rules: ["If `precision` is `0`, then an integer is returned, otherwise a float is returned."] + } + examples: [{ + title: "Round a number down (without precision)" + source: "floor(4.345)" + return: 4 + }, { + title: "Round a number down (with precision)" + source: "floor(4.345, precision: 2)" + return: 4.34 + }] + } + parse_syslog: { + anchor: "parse_syslog" + name: "parse_syslog" + category: "Parse" + description: "Parses the `value` in [Syslog](https://en.wikipedia.org/wiki/Syslog) format." + notices: [""" + The function makes a best effort to parse the various Syslog formats out in the wild. This includes + [RFC 6587](https://tools.ietf.org/html/rfc6587), [RFC 5424](https://tools.ietf.org/html/rfc5424), [RFC 3164](https://tools.ietf.org/html/rfc3164), and other + common variations (such as the Nginx Syslog style). + """, """ + All structured data values are returned as strings, it is recommended to manually coerce values as you see fit. + """] + arguments: [{ + name: "value" + description: "The text containing the syslog message to parse." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted Syslog log"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse Syslog log (5424)" + source: """ + parse_syslog( + \t"<13>1 2020-03-13T20:45:38.119Z dynamicwireless.name non 2426 ID931 [exampleSDID@32473 iut="3" eventSource= "Application" eventID="1011"] Try to override the THX port, maybe it will reboot the neural interface!" + ) + """ + return: { + severity: "notice" + facility: "user" + timestamp: "2020-03-13T20:45:38.119Z" + hostname: "dynamicwireless.name" + appname: "non" + procid: "2426" + msgid: "ID931" + iut: "3" + eventSource: "Application" + eventID: "1011" + message: "Try to override the THX port, maybe it will reboot the neural interface!" + } + }] + } + get_env_var: { + anchor: "get_env_var" + name: "get_env_var" + category: "System" + description: "Gets the value of the environment variable specifed by `name`." + notices: [] + arguments: [{ + name: "name" + description: "Name of the environment variable." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["Environment variable `name` does not exist", "Value of environment variable `name` is not valid unicode"] + return: { + types: ["string"] + } + examples: [{ + title: "Get environment variable" + source: "get_env_var(\"HOME\")" + return: "/root" + }] + } + ip_subnet: { + anchor: "ip_subnet" + name: "ip_subnet" + category: "IP" + description: "Extracts the subnet address from the `ip` using the supplied `subnet`." + notices: [""" + Works with both IPv4 and IPv6 addresses. The IP version for the mask must be the same as the supplied address. + """] + arguments: [{ + name: "ip" + description: "The ip address - either a v4 or a v6 address." + required: true + multiple: false + type: ["string"] + }, { + name: "subnet" + description: """ + The subnet to extract from the ip address. This can be either in the form of a prefix length, + eg. `/8` or as a net mask - `255.255.0.0`. The net mask can be either an IPv4 or IPv6 address. + """ + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`ip` is not a valid IP address", "`subnet` is not a valid subnet."] + return: { + types: ["string"] + } + examples: [{ + title: "IPv4 subnet" + source: "ip_subnet(\"192.168.10.32\", \"255.255.255.0\")" + return: "192.168.10.0" + }, { + title: "IPv6 subnet" + source: "ip_subnet(\"2404:6800:4003:c02::64\", \"/32\")" + return: "2404:6800::" + }] + } + parse_aws_alb_log: { + anchor: "parse_aws_alb_log" + name: "parse_aws_alb_log" + category: "Parse" + description: "Parses `value` in the [Elastic Load Balancer Access format](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html#access-log-entry-examples)." + notices: [] + arguments: [{ + name: "value" + description: "Access log of the Application Load Balancer." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted AWS ALB log"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse AWS ALB log" + source: """ + parse_aws_alb_log( + \t"http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 200 200 34 366 \\"GET http://www.example.com:80/ HTTP/1.1\\" \\"curl/7.46.0\\" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 \\"Root=1-58337364-23a8c76965a2ef7629b185e3\\" \\"-\\" \\"-\\" 0 2018-11-30T22:22:48.364000Z \\"forward\\" \\"-\\" \\"-\\" \\"-\\" \\"-\\" \\"-\\" \\"-\\"" + ) + """ + return: { + type: "http" + timestamp: "2018-11-30T22:23:00.186641Z" + elb: "app/my-loadbalancer/50dc6c495c0c9188" + client_host: "192.168.131.39:2817" + target_host: null + request_processing_time: 0.0 + target_processing_time: 0.001 + response_processing_time: 0.0 + elb_status_code: "200" + target_status_code: "200" + received_bytes: 34 + sent_bytes: 366 + request_method: "GET" + request_url: "http://www.example.com:80/" + request_protocol: "HTTP/1.1" + user_agent: "curl/7.46.0" + ssl_cipher: null + ssl_protocol: null + target_group_arn: "arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067" + trace_id: "Root=1-58337364-23a8c76965a2ef7629b185e3" + domain_name: null + chosen_cert_arn: null + matched_rule_priority: "0" + request_creation_time: "2018-11-30T22:22:48.364000Z" + actions_executed: "forward" + redirect_url: null + error_reason: null + target_port_list: [] + target_status_code_list: [] + classification: null + classification_reason: null + } + }] + } + to_unix_timestamp: { + anchor: "to_unix_timestamp" + name: "to_unix_timestamp" + category: "Coerce" + description: """ + Coerces the `value` into a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time). + + By default, the number of seconds since the Unix epoch is returned, but milliseconds or nanoseconds can be + returned via the `unit` argument. + """ + notices: [] + arguments: [{ + name: "value" + description: "The timestamp to convert to Unix." + required: true + multiple: false + type: ["timestamp"] + }, { + name: "unit" + description: "The time unit" + type: ["string"] + required: false + multiple: false + enum: { + seconds: "Express Unix time in seconds" + milliseconds: "Express Unix time in milliseconds" + nanoseconds: "Express Unix time in nanoseconds" + } + default: "seconds" + }] + internal_failure_reasons: [] + return: { + types: ["integer"] + } + examples: [{ + title: "Convert to a Unix timestamp (seconds)" + source: "to_unix_timestamp(to_timestamp(\"2021-01-01T00:00:00+00:00\"))" + return: 1609459200 + }, { + title: "Convert to a Unix timestamp (milliseconds)" + source: "to_unix_timestamp(to_timestamp(\"2021-01-01T00:00:00+00:00\"), unit: \"milliseconds\")" + return: 1609459200000 + }, { + title: "Convert to a Unix timestamp (nanoseconds)" + source: "to_unix_timestamp(to_timestamp(\"2021-01-01T00:00:00+00:00\"), unit: \"nanoseconds\")" + return: 1609459200000000000 + }] + } + encode_base64: { + anchor: "encode_base64" + name: "encode_base64" + category: "Codec" + description: "Encodes the `value` to [Base64](https://en.wikipedia.org/wiki/Base64)." + notices: [] + arguments: [{ + name: "value" + description: "The string to encode." + required: true + multiple: false + type: ["string"] + }, { + name: "padding" + description: "Whether the Base64 output is [padded](https://en.wikipedia.org/wiki/Base64#Output_padding)." + required: false + multiple: false + type: ["boolean"] + default: true + }, { + name: "charset" + description: "The character set to use when encoding the data." + required: false + multiple: false + type: ["string"] + default: "standard" + enum: { + standard: "[Standard](https://tools.ietf.org/html/rfc4648#section-4) Base64 format." + url_safe: "Modified Base64 for [URL variants](https://en.wikipedia.org/wiki/Base64#URL_applications)." + } + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Encode to Base64 (default)" + source: "encode_base64(\"please encode me\")" + return: "cGxlYXNlIGVuY29kZSBtZQ==" + }, { + title: "Encode to Base64 (without padding)" + source: "encode_base64(\"please encode me, no padding though\", padding: false)" + return: "cGxlYXNlIGVuY29kZSBtZSwgbm8gcGFkZGluZyB0aG91Z2g" + }, { + title: "Encode to Base64 (URL safe)" + source: "encode_base64(\"please encode me, but safe for URLs\", charset: \"url_safe\")" + return: "cGxlYXNlIGVuY29kZSBtZSwgYnV0IHNhZmUgZm9yIFVSTHM=" + }] + } + timestamp: { + anchor: "timestamp" + name: "timestamp" + category: "Type" + description: """ + Errors if `value` is not a timestamp, if `value` is a timestamp it is returned. + + This allows the type checker to guarantee that the returned value is a timestamp and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is a timestamp." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not a timestamp."] + return: { + types: ["string"] + rules: [#"If `value` is a timestamp then it is returned."#, #"Otherwise an error is raised."#] + } + examples: [{ + title: "Declare a timestamp type" + input: { + log: { + timestamp: "2020-10-10T16:00:00Z" + } + } + source: "timestamp(.timestamp)" + return: "2020-10-10T16:00:00Z" + }] + } + sha3: { + anchor: "sha3" + name: "sha3" + category: "Hash" + description: "Calculates a [SHA-3](https://en.wikipedia.org/wiki/SHA-3) hash of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The string to calculate the hash for." + required: true + multiple: false + type: ["string"] + }, { + name: "variant" + description: "The variant of the algorithm to use." + enum: { + "SHA3-224": "SHA3-224 algorithm" + "SHA3-256": "SHA3-256 algorithm" + "SHA3-384": "SHA3-384 algorithm" + "SHA3-512": "SHA3-512 algorithm" + } + required: false + multiple: false + default: "SHA3-512" + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Calaculate sha3 hash" + source: "sha3(\"foo\", variant: \"SHA3-224\")" + return: "f4f6779e153c391bbd29c95e72b0708e39d9166c7cea51d1f10ef58a" + }] + } + parse_regex: { + anchor: "parse_regex" + name: "parse_regex" + category: "Parse" + description: """ + Parses the `value` via the provided [Regex](https://en.wikipedia.org/wiki/Regular_expression) `pattern`. + + This function differs from the `parse_regex_all` function in that it returns the first match only. + """ + notices: [""" + VRL aims to provide purpose-specific [parsing functions](https://vector.dev/docs/reference/vrl/functions/#parse-functions) for common log formats. + Before reaching for the `parse_regex` function, see if a Remap [`parse_*` function](https://vector.dev/docs/reference/vrl/functions/#parse-functions) + already exists for your format. If not, please consider [opening an issue](https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature) to + request support. + """, """ + All values are returned as strings, it is recommended to manually coerce values as you see fit. + """] + arguments: [{ + name: "value" + description: "The string to search." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "The regular expression pattern to search against." + required: true + multiple: false + type: ["regex"] + }] + internal_failure_reasons: ["`value` fails to parse via the provided `pattern`"] + return: { + types: ["object"] + rules: ["Matches will return the capture groups corresponding to the leftmost matches in the text.", "If no match is found an error is raised."] + } + examples: [{ + title: "Parse via Regex (with capture groups)" + source: "parse_regex(\"first group and second group.\", r'(?P.*?) group')" + return: { + number: "first" + "0": "first group" + "1": "first" + } + }, { + title: "Parse via Regex (without capture groups)" + source: "parse_regex(\"first group and second group.\", r'(?.*?) group')" + return: { + "1": "first" + } + }] + } + bool: { + anchor: "bool" + name: "bool" + category: "Type" + description: """ + Errors if `value` is not a boolean, if `value` is a boolean it is returned. + + This allows the type checker to guarantee that the returned value is a boolean and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is a boolean." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not a boolean."] + return: { + types: ["boolean"] + rules: [#"If `value` is a boolean then it is returned."#, #"Otherwise an error is raised."#] + } + examples: [{ + title: "Declare a boolean type" + input: { + log: { + value: false + } + } + source: "bool(.value)" + return: false + }] + } + upcase: { + anchor: "upcase" + name: "upcase" + description: """ + Upcases the `value`. + + "Upcase" is defined according to the terms of the Unicode Derived Core Property Uppercase. + """ + arguments: [{ + name: "value" + description: "The string to convert to uppercase." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + category: "String" + notices: [] + examples: [{ + title: "Upcase a string" + source: "upcase(\"Hello, World!\")" + output: { + log: { + message: "HELLO, WORLD!" + } + } + }] + } + parse_json: { + anchor: "parse_json" + name: "parse_json" + category: "Parse" + description: "Parses the `value` as JSON." + notices: [""" + Only JSON types are returned. If you need to convert a `string` into a `timestamp`, consider the + `parse_timestamp` function. + """] + arguments: [{ + name: "value" + description: "The string representation of the JSON to parse." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a valid JSON formatted payload"] + return: { + types: ["boolean", "integer", "float", "string", "object", "array", "null"] + } + examples: [{ + title: "Parse JSON" + source: "parse_json(\"{\\\"key\\\": \\\"val\\\"}\")" + return: { + key: "val" + } + }] + } + is_nullish: { + anchor: "is_nullish" + name: "is_nullish" + category: "Type" + description: """ + Determines whether the `value` is "nullish". + + Nullish indicates the absence of a meaningful value. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to check for \"nullishness,\" i.e. a useless value." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + rules: [#"If `value` is `null`, then `true` is returned."#, #"If `value` is `"-"`, then `true` is returned."#, "If `value` is whitespace, as defined by [Unicode `White_Space` property](https://en.wikipedia.org/wiki/Unicode_character_property#Whitespace), then `true` is returned."] + } + examples: [{ + title: "Null detection (blank string)" + source: "is_nullish(\"\")" + return: true + }, { + title: "Null detection (dash string)" + source: "is_nullish(\"-\")" + return: true + }, { + title: "Null detection (whitespace)" + source: """ + is_nullish(" + + ") + """ + return: true + }] + } + ip_to_ipv6: { + anchor: "ip_to_ipv6" + name: "ip_to_ipv6" + category: "IP" + description: "Converts the `ip` to an IPv6 address." + notices: [] + arguments: [{ + name: "ip" + description: "The ip address to convert to IPv6." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`ip` is not a valid IP address"] + return: { + types: ["string"] + rules: ["If `ip` is already an IPv6 address it is passed through untouched.", "If `ip` is a IPv4 address then it converted to IPv4 mapped IPv6 addresses."] + } + examples: [{ + title: "IPv4 to IPv6" + source: "ip_to_ipv6(\"192.168.10.32\")" + return: "::ffff:192.168.10.32" + }] + } + match: { + anchor: "match" + name: "match" + category: "String" + description: "Determines if the `value` matches the `pattern`." + notices: [] + arguments: [{ + name: "value" + description: "The value to match." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "The regular expression pattern to match against." + required: true + multiple: false + type: ["regex"] + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "Regex match on a string" + source: "match(\"I'm a little teapot\", r'teapot')" + return: true + }] + } + starts_with: { + anchor: "starts_with" + name: "starts_with" + category: "String" + description: "Determines if the `value` begins with the `substring`." + notices: [] + arguments: [{ + name: "value" + description: "The string to search." + required: true + multiple: false + type: ["string"] + }, { + name: "substring" + description: "The substring `value` must start with." + required: true + multiple: false + type: ["string"] + }, { + name: "case_sensitive" + description: "Should the match be case sensitive?" + required: false + multiple: false + type: ["boolean"] + default: true + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "String starts with (case sensitive)" + source: "starts_with(\"The Needle In The Haystack\", \\\"The Needle\\\")" + return: true + }, { + title: "String starts with (case insensitive)" + source: "starts_with(\"The Needle In The Haystack\", \\\"the needle\\\", case_sensitive: false)" + return: true + }] + } + string: { + anchor: "string" + name: "string" + category: "Type" + description: """ + Errors if `value` is not a string, if `value` is a string it is returned. + + This allows the type checker to guarantee that the returned value is a string and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is a string." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not a string."] + return: { + types: ["string"] + rules: [#"If `value` is a string then it is returned."#, #"Otherwise an error is raised."#] + } + examples: [{ + title: "Delcare a string type" + input: { + log: { + message: '{"field": "value"}' + } + } + source: "string(.message)" + return: '{"field": "value"}' + }] + } + merge: { + anchor: "merge" + name: "merge" + category: "Object" + description: "Merges the `from` object into the `to` object." + notices: [] + arguments: [{ + name: "to" + description: "The object to merge into." + required: true + multiple: false + type: ["string"] + }, { + name: "from" + description: "The object to merge from." + required: true + multiple: false + type: ["object"] + }, { + name: "deep" + description: "If true a deep merge is performed, otherwise only top level fields are merged." + required: false + multiple: false + default: false + type: ["boolean"] + }] + internal_failure_reasons: [] + return: { + types: ["object"] + rules: [#"If a key exists in both objects, the field from the `from` object is chosen."#, #"If `deep` is specified, and a key exists in both objects, and both these fields are also objects, then those objects will merge recursively as well."#] + } + examples: [{ + title: "Object merge (shallow)" + source: """ + merge( + \t{ + \t\t"parent1": { + \t\t\t"child1": 1, + \t\t\t"child2": 2 + \t\t}, + \t\t"parent2": { + \t\t\t"child3": 3 + \t\t} + \t}, + \t{ + \t\t"parent1": { + \t\t\t"child2": 4, + \t\t\t"child5": 4 + \t\t} + \t} + ) + """ + return: { + parent1: { + child2: 4 + child5: 5 + } + parent2: { + child3: 3 + } + } + }, { + title: "Object merge (deep)" + source: """ + merge( + \t{ + \t\t"parent1": { + \t\t\t"child1": 1, + \t\t\t"child2": 2 + \t\t}, + \t\t"parent2": { + \t\t\t"child3": 3 + \t\t} + \t}, + \t{ + \t\t"parent1": { + \t\t\t"child2": 4, + \t\t\t"child5": 4 + \t\t} + \t}, + \tdeep: true + ) + """ + return: { + parent1: { + child1: 1 + child2: 4 + child5: 5 + } + parent2: { + child3: 3 + } + } + }] + } + decode_base64: { + anchor: "decode_base64" + name: "decode_base64" + category: "Codec" + description: "Decodes the `value` (a [Base64](https://en.wikipedia.org/wiki/Base64) string) into its original string." + notices: [] + arguments: [{ + name: "value" + description: "The [Base64](https://en.wikipedia.org/wiki/Base64) data to decode." + required: true + multiple: false + type: ["string"] + }, { + name: "charset" + description: "The character set to use when decoding the data." + required: false + multiple: false + type: ["string"] + default: "standard" + enum: { + standard: "[Standard](https://tools.ietf.org/html/rfc4648#section-4) Base64 format." + url_safe: "Modified Base64 for [URL variants](https://en.wikipedia.org/wiki/Base64#URL_applications)." + } + }] + internal_failure_reasons: ["`value` isn't a valid encoded Base64 string."] + return: { + types: ["string"] + } + examples: [{ + title: "Decode Base64 data (default)" + source: "decode_base64(\"eW91IGhhdmUgc3VjY2Vzc2Z1bGx5IGRlY29kZWQgbWU=\")" + return: "you have successfully decoded me" + }, { + title: "Decode Base64 data (URL safe)" + source: "decode_base64(\"eW91IGNhbid0IG1ha2UgeW91ciBoZWFydCBmZWVsIHNvbWV0aGluZyBpdCB3b24ndA==\", charset: \"url_safe\")" + return: "you can't make your heart feel something it won't" + }] + } + float: { + anchor: "float" + name: "float" + category: "Type" + description: """ + Errors if `value` is not a float, if `value` is a float it is returned. + + This allows the type checker to guarantee that the returned value is a float and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is a float." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not a float."] + return: { + types: ["float"] + rules: [#"If `value` is an float then it is returned."#, #"Otherwise an error is raised."#] + } + examples: [{ + title: "Delcare a float type" + input: { + log: { + value: 42 + } + } + source: "float(.radius)" + return: 42 + }] + } + array: { + anchor: "array" + name: "array" + category: "Type" + description: """ + Errors if `value` is not an array, if `value` is an array it is returned. + + This allows the type checker to guarantee that the returned value is an array and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is an array." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not an array."] + return: { + types: ["array"] + } + examples: [{ + title: "Declare an array type" + input: { + log: { + value: [1, 2, 3] + } + } + source: "array(.value)" + return: [1, 2, 3] + }] + } + log: { + anchor: "log" + name: "log" + category: "Debug" + description: "Logs the `value` to Vector's [stdout](https://en.wikipedia.org/wiki/Standard_streams#Standard_output_(stdout)) at the specified `level`." + notices: [] + arguments: [{ + name: "value" + description: "The value to log." + required: true + multiple: false + type: ["any"] + }, { + name: "level" + description: "The log level." + required: false + multiple: false + type: ["string"] + enum: { + trace: "Log at the `trace` level." + debug: "Log at the `debug` level." + info: "Log at the `info` level." + warn: "Log at the `warn` level." + error: "Log at the `error` level." + } + default: "info" + }] + internal_failure_reasons: [] + return: { + types: ["null"] + } + examples: [{ + title: "Log a message" + source: "log(\"Hello, World!\", level: \"info\")" + return: null + }, { + title: "Log an error" + source: """ + ts, err = format_timestamp(to_timestamp("10-Oct-2020 1")) + if err != null { + \tlog(err, level: "error") + } + """ + return: null + }] + } + del: { + anchor: "del" + name: "del" + category: "Event" + description: "Removes the field specified by the `path` from the current event object." + arguments: [{ + name: "path" + description: "The path of the field to delete." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + notices: [""" + The `del` function _modifies the current event in-place_ and returns the value of the deleted field. + """] + return: { + types: ["any"] + rules: ["The return is the value of the field being deleted. If the field does not exist, `null` is returned."] + } + examples: [{ + title: "Delete a field" + input: { + log: { + field1: 1 + field2: 2 + } + } + source: "del(.field1)" + output: { + log: { + field2: 2 + } + } + }, { + title: "Rename a field" + input: { + log: { + old_field: "please rename me" + } + } + source: ".new_field = del(.old_field)" + output: { + log: { + new_field: "please rename me" + } + } + }] + } + to_syslog_severity: { + anchor: "to_syslog_severity" + name: "to_syslog_severity" + category: "Coerce" + description: """ + Coerces the `value`, a Syslog [log level keyword](https://en.wikipedia.org/wiki/Syslog#Severity_level), into an Syslog integer + severity level (`0` to `7`). + """ + notices: [] + arguments: [{ + name: "value" + description: "The Syslog level keyword to convert." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a valid Syslog level keyword"] + return: { + types: ["integer"] + rules: ["The now-deprecated keywords `panic`, `error`, and `warn` are converted to `0`, `3`, and `4` respectively."] + } + examples: [{ + title: "Coerce to Syslog severity" + source: "to_syslog_severity(\"alert\")" + return: 1 + }] + } + append: { + anchor: "append" + name: "append" + category: "Array" + description: "Appends the `items` to the end of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The array" + required: true + multiple: false + type: ["array"] + }, { + name: "items" + description: "The items to append" + required: true + multiple: false + type: ["array"] + }] + internal_failure_reasons: [] + return: { + types: ["array"] + } + examples: [{ + title: "Append to an array" + source: " append([1, 2], [3, 4])" + return: [1, 2, 3, 4] + }] + } + parse_url: { + anchor: "parse_url" + name: "parse_url" + category: "Parse" + description: "Parses the `value` in [URL](https://en.wikipedia.org/wiki/URL) format." + notices: [] + arguments: [{ + name: "value" + description: "The text of the url." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted URL"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse URL" + source: "parse_url(\"ftp://foo:bar@vector.dev:4343/foobar?hello=world#123\")" + return: { + scheme: "ftp" + username: "foo" + password: "bar" + host: "vector.dev" + port: 4343 + path: "/foobar" + query: { + hello: "world" + } + fragment: "123" + } + }] + } + to_int: { + anchor: "to_int" + name: "to_int" + category: "Coerce" + description: "Coerces the `value` into an integer." + notices: [] + arguments: [{ + name: "value" + description: "The value to convert to an integer." + required: true + multiple: false + type: ["integer", "float", "boolean", "string", "timestamp"] + }] + internal_failure_reasons: ["`value` is not a supported integer representation"] + return: { + types: ["integer"] + rules: ["If `value` is a string, it must be the string representation of an integer or else an error is raised.", "If `value` is a boolean, `0` will be returned for `false` and `1` will be returned for `true`.", "If `value` is a timestamp, a [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) (in seconds) is returned."] + } + examples: [{ + title: "Coerce to an int (string)" + source: "to_int(\"2\")" + return: 2 + }, { + title: "Coerce to an int (timestamp)" + source: "to_int(to_timestamp(\"2020-12-30 22:20:53.824727 UTC\"))" + return: 1609366853 + }] + } + flatten: { + anchor: "flatten" + name: "flatten" + category: "Enumerate" + description: "Flattens the `value` into a single-level representation." + notices: [] + arguments: [{ + name: "value" + description: "The array or object to flatten." + required: true + multiple: false + type: ["array", "object"] + }] + internal_failure_reasons: [] + return: { + types: ["array", "object"] + rules: ["The return type will match the `value` type."] + } + examples: [{ + title: "Flatten array" + source: "flatten([1, [2, 3, 4], [5, [6, 7], 8], 9])" + return: [1, 2, 3, 4, 5, 6, 7, 8, 9] + }, { + title: "Flatten object" + source: """ + flatten({ + \t"parent1": { + \t\t"child1": 1, + \t\t"child2": 2 + \t}, + \t"parent2": { + \t\t"child3": 3 + \t} + }) + """ + return: { + "grandparent.parent1.child1": 1 + "grandparent.parent1.child2": 2 + "grandparent.parent2.child1": 2 + } + }] + } + truncate: { + anchor: "truncate" + name: "truncate" + category: "String" + description: "Truncates the `value` up to the `limit`." + notices: [] + arguments: [{ + name: "value" + description: "The string to truncate." + required: true + multiple: false + type: ["string"] + }, { + name: "limit" + description: "The number of characters to truncate the string after." + required: true + multiple: false + type: ["integer", "float"] + }, { + name: "ellipsis" + description: "If true, an ellipsis (...) is appended should the string be truncated." + required: true + multiple: false + type: ["boolean"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + rules: ["If `limit` is larger than the length of the string, the string is returned unchanged.", "If `ellipsis` is `true`, then an ellipsis (...) will be appended to the string (beyond the specified limit)."] + } + examples: [{ + title: "Truncate a string" + source: "truncate(\"A rather long sentence.\", limit = 11, ellipsis = true)" + return: "A rather lo..." + }] + } + replace: { + anchor: "replace" + name: "replace" + category: "String" + description: "Replaces all matching instances of `pattern` in the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The original string." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "Replace all matches of this pattern. Can be a static string or a regular expression." + required: true + multiple: false + type: ["regex", "string"] + }, { + name: "with" + description: "The string that the matches are replaced with." + required: true + multiple: false + type: ["string"] + }, { + name: "count" + description: "The maximum number of replacements to perform. -1 means replace all matches." + required: false + multiple: false + default: -1 + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Replace literal text" + source: "replace(\"Apples and Bananas\", \"and\", \"not\")" + return: "Apples not Bananas" + }, { + title: "Replace via regular expression" + source: "replace(\"Apples and Bananas\", r'bananas'i, \"Pineapples\")" + return: "apples and Pineapples" + }, { + title: "Replace first instance" + source: "replace(\"Bananas and Bananas\", \"Bananas\", \"Pineapples\", count: 1)" + return: "Pineapples and Bananas" + }] + } + includes: { + anchor: "includes" + name: "includes" + category: "Enumerate" + description: "Determines whether the `value` includes the `item`." + notices: [] + arguments: [{ + name: "value" + description: "The array" + required: true + multiple: false + type: ["array"] + }, { + name: "item" + description: "The item to check" + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "Array includes" + source: "includes([\"apple\", \"orange\", \"banana\"], \"banana\")" + return: true + }] + } + parse_tokens: { + anchor: "parse_tokens" + name: "parse_tokens" + category: "Parse" + description: """ + Parses the `value` in "token" format. + + A token is considered to be: + + * A word surrounded by whitespace. + * Text delimited by double quotes: `".."`. Quotes can be included in the token if they are escaped by a backslash (`\\`). + * Text delimited by square brackets: `[..]`. Closing square brackets can be included in the token if they are escaped by a backslash (`\\`). + """ + notices: [""" + All token values are returned as strings, it is recommended to manually coerce values as you see fit. + """] + arguments: [{ + name: "value" + description: "The string to tokenize." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted tokenized string"] + return: { + types: ["array"] + } + examples: [{ + title: "Parse tokens" + source: """ + parse_tokens( + \t"A sentence \\"with \\\\"a\\\\" sentence inside\\" and [some brackets]" + ) + """ + return: ["A", "sentence", #"with \"a\" sentence inside"#, "and", "some brackets"] + }] + } + get_hostname: { + anchor: "get_hostname" + name: "get_hostname" + category: "System" + description: "Gets the local system's hostname." + notices: [] + arguments: [] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Get hostname" + input: { + log: {} + } + source: ".hostname = get_hostname()" + output: { + log: { + hostname: "localhost.localdomain" + } + } + }] + } + parse_aws_vpc_flow_log: { + anchor: "parse_aws_vpc_flow_log" + name: "parse_aws_vpc_flow_log" + category: "Parse" + description: "Parses `value` in the [VPC Flow Logs format](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html)." + notices: [] + arguments: [{ + name: "value" + description: "VPC Flow Log." + required: true + multiple: false + type: ["string"] + }, { + name: "format" + description: "VPC Flow Log format." + required: false + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted AWS VPC Flow log"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse AWS VPC Flow log (default format)" + source: "parse_aws_vpc_flow_log(\"2 123456789010 eni-1235b8ca123456789 - - - - - - - 1431280876 1431280934 - NODATA\")" + return: { + version: 2 + account_id: 123456789010 + interface_id: "eni-1235b8ca123456789" + srcaddr: null + dstaddr: null + srcport: null + dstport: null + protocol: null + packets: null + bytes: null + start: 1431280876 + end: 1431280934 + action: null + log_status: "NODATA" + } + }, { + title: "Parse AWS VPC Flow log (custom format)" + source: """ + parse_aws_vpc_flow_log( + \t"- eni-1235b8ca123456789 10.0.1.5 10.0.0.220 10.0.1.5 203.0.113.5", + \t"instance_id interface_id srcaddr dstaddr pkt_srcaddr pkt_dstaddr" + ) + """ + return: { + instance_id: null + interface_id: "eni-1235b8ca123456789" + srcaddr: "10.0.1.5" + dstaddr: "10.0.0.220" + pkt_srcaddr: "10.0.1.5" + pkt_dstaddr: "203.0.113.5" + } + }] + } + now: { + anchor: "now" + name: "now" + category: "Timestamp" + description: "Returns the current timestamp in the UTC timezone with nanosecond precision." + notices: [] + arguments: [] + internal_failure_reasons: [] + return: { + types: ["timestamp"] + } + examples: [{ + title: "Generate a current timestamp" + source: "now()" + return: "2020-10-21T20:53:12.212221Z" + }] + } + assert: { + anchor: "assert" + name: "assert" + category: "Debug" + description: """ + Asserts the `condition`. + + If the `condition` evaluates to `false` the program is aborted with the `message`. + """ + notices: [""" + This function is designed to be used in a standalone fashion, aborting the script if it fails. It should + not be used in logical expressions. + """] + arguments: [{ + name: "condition" + description: "The condition to check." + required: true + multiple: false + type: ["boolean"] + }, { + name: "message" + description: "Should condition be false, message will be reported as the failure message." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`condition` evaluates to `false`"] + return: { + types: ["null"] + } + examples: [{ + title: "Assertion (true)" + source: "assert(\"foo\" == \"foo\", message: \"Foo must be foo!\")" + return: null + }, { + title: "Assertion (false)" + source: "assert(\"foo\" == \"bar\", message: \"Foo must be foo!\")" + }] + } + to_string: { + anchor: "to_string" + name: "to_string" + category: "Coerce" + description: "Coerces the `value` into a string." + notices: [] + arguments: [{ + name: "value" + description: "The value to return a string representation of." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + rules: [#"If `value` is an integer then its string representation is returned."#, #"If `value` is an float then its string representation is returned."#, #"If `value` is an boolean then `"true"` or `"false"` is returned."#, #"If `value` is an timestamp then its RFC3339 representation is returned."#, #"If `value` is an object then it is encoded into JSON."#, #"If `value` is a list then it is encoded into JSON."#] + } + examples: [{ + title: "Coerce to a string (boolean)" + source: "to_string(true)" + return: true + }, { + title: "Coerce to a string (int)" + source: "to_string(52)" + return: "52" + }, { + title: "Coerce to a string (float)" + source: "to_string(52.2)" + return: "52.2" + }] + } + sha1: { + anchor: "sha1" + name: "sha1" + category: "Hash" + description: "Calculates a [SHA-1](https://en.wikipedia.org/wiki/SHA-1) hash of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The string to calculate the hash for." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Calculate sha1 hash" + source: "sha1(\"foo\")" + return: "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33" + }] + } + parse_duration: { + anchor: "parse_duration" + name: "parse_duration" + category: "Parse" + description: "Parses the `value` in a human duration format specified by `unit`." + notices: [] + arguments: [{ + name: "value" + description: "The string of the duration." + required: true + multiple: false + type: ["string"] + }, { + name: "unit" + description: "The string of the duration unit the number should be output as." + required: true + multiple: false + type: ["string"] + enum: { + ns: "Nanoseconds (1 billion nanoseconds in a second)" + us: "Microseconds (1 million microseconds in a second)" + µs: "Microseconds (1 million microseconds in a second)" + ms: "Milliseconds (1 thousand microseconds in a second)" + cs: "Centiseconds (100 centiseconds in a second)" + ds: "Deciseconds (10 deciseconds in a second)" + s: "Seconds" + m: "Minutes (60 seconds in a minute)" + h: "Hours (60 minutes in an hour)" + d: "Days (24 hours in a day)" + } + }] + internal_failure_reasons: ["`value` is not a properly formatted duration"] + return: { + types: ["float"] + } + examples: [{ + title: "Parse duration (milliseconds)" + source: "parse_duration(\"1005ms\", unit: \"s\")" + return: 1.005 + }] + } + push: { + anchor: "push" + name: "push" + category: "Array" + description: "Adds the `item` to the end of the `value` array." + notices: [] + arguments: [{ + name: "value" + description: "The array" + required: true + multiple: false + type: ["array"] + }, { + name: "item" + description: "The item to push" + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: [] + return: { + types: ["array"] + rules: ["A new array is returned, the `value` is not modified in place."] + } + examples: [{ + title: "Push an item onto an array" + source: "push([1, 2], 3)" + return: [1, 2, 3] + }] + } + exists: { + anchor: "exists" + name: "exists" + category: "Event" + description: "Checks if the `path` exists for the current event." + notices: [] + arguments: [{ + name: "path" + description: "The paths of the fields to check." + required: true + multiple: false + type: ["path"] + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "Exists (field)" + input: { + log: { + field: 1 + } + } + source: "exists(.field)" + return: true + }, { + title: "Exits (array element)" + input: { + log: { + array: [1, 2, 3] + } + } + source: "exists(.array[2])" + return: true + }] + } + parse_glog: { + anchor: "parse_glog" + name: "parse_glog" + category: "Parse" + description: "Parses the `value` using the [glog (Google Logging Library)](https://github.com/google/glog) format." + notices: [] + arguments: [{ + name: "value" + description: "The string to parse." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` does not match the `glog` format"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse via glog" + source: "parse_glog(\"I20210131 14:48:54.411655 15520 main.c++:9] Hello world!\")" + return: { + level: "info" + timestamp: "2021-01-31T14:48:54.411655Z" + id: 15520 + file: "main.c++" + line: 9 + message: "Hello world!" + } + }] + } + compact: { + anchor: "compact" + name: "compact" + category: "Enumerate" + description: """ + Compacts the `value` by removing "empty" values. + + What is considered empty can be specified with the parameters. + """ + notices: [] + arguments: [{ + name: "value" + description: "The object or array to compact." + required: true + multiple: false + type: ["array", "object"] + }, { + name: "recursive" + description: "Should the compact be recursive." + required: false + multiple: false + default: true + type: ["boolean"] + }, { + name: "null" + description: "Should null be treated as an empty value." + required: false + multiple: false + default: true + type: ["boolean"] + }, { + name: "string" + description: "Should an empty string be treated as an empty value." + required: false + multiple: false + default: true + type: ["boolean"] + }, { + name: "object" + description: "Should an empty object be treated as an empty value." + required: false + multiple: false + default: true + type: ["boolean"] + }, { + name: "array" + description: "Should an empty array be treated as an empty value." + required: false + multiple: false + default: true + type: ["boolean"] + }, { + name: "nullish" + description: "Tests if the value is \"nullish\" as defined by the `is_nullish` function." + required: false + multiple: false + default: false + type: ["boolean"] + }] + internal_failure_reasons: [] + return: { + types: ["array", "object"] + rules: ["The return type will match the `value` type."] + } + examples: [{ + title: "Compact an array" + source: "compact([\"foo\", \"bar\", \"\", null, [], \"buzz\"], string: true, array: true, null: true)" + return: ["foo", "bar", "buzz"] + }, { + title: "Compact an object" + source: "compact({\"field1\": 1, \"field2\": \"\", \"field3\": [], \"field4\": null}, string: true, array: true, null: true)" + return: { + field1: 1 + } + }] + } + format_number: { + anchor: "format_number" + name: "format_number" + category: "Number" + description: "Formats the `value` into a string representation of the number." + notices: [] + arguments: [{ + name: "value" + description: "The number to format as a string." + required: true + multiple: false + type: ["integer", "float"] + }, { + name: "scale" + description: "The number of decimal places to display." + required: false + multiple: false + type: ["integer"] + }, { + name: "decimal_separator" + description: "The character to use between the whole and decimal parts of the number." + required: false + multiple: false + type: ["string"] + default: "." + }, { + name: "grouping_separator" + description: "The character to use between each thousands part of the number." + required: false + multiple: false + type: ["string"] + default: "," + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Format a number (3 decimals)" + source: "format_number(1234567.89, 3, decimal_separator: \".\", grouping_separator: \",\")" + return: "1,234,567.890" + }] + } + parse_timestamp: { + anchor: "parse_timestamp" + name: "parse_timestamp" + category: "Parse" + description: "Parses the `value` in [strptime](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) `format`." + notices: [] + arguments: [{ + name: "value" + description: "The text of the timestamp." + required: true + multiple: false + type: ["string"] + }, { + name: "format" + description: "The [strptime](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers) format." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` fails to parse via the provided `format`"] + return: { + types: ["timestamp"] + } + examples: [{ + title: "Parse timestamp" + source: "parse_timestamp(\"10-Oct-2020 16:00\", format: \"%v %R\")" + return: "2020-10-10T16:00:00Z" + }] + } + format_timestamp: { + anchor: "format_timestamp" + name: "format_timestamp" + category: "Timestamp" + description: "Formats the `value` into a string representation of the timestamp." + notices: [] + arguments: [{ + name: "value" + description: "The timestamp to format as text." + required: true + multiple: false + type: ["timestamp"] + }, { + name: "format" + description: "The format string as decribed by the [Chrono library](https://docs.rs/chrono/latest/chrono/format/strftime/index.html#specifiers)." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Format a timestamp (ISO8601/RFC 3339)" + source: "format_timestamp(now(), format: \"%+\")" + return: "2020-10-21T16:00:00Z" + }, { + title: "Format a timestamp (custom)" + source: "format_timestamp(now(), format: \"%v %R\")" + return: "10-Oct-2020 16:00" + }] + } + parse_regex_all: { + anchor: "parse_regex_all" + name: "parse_regex_all" + category: "Parse" + description: """ + Parses the `value` via the provided [Regex](https://en.wikipedia.org/wiki/Regular_expression) `pattern`. + + This function differs from the `parse_regex` function in that it returns _all_ matches, not just the first. + """ + notices: [""" + VRL aims to provide purpose-specific [parsing functions](https://vector.dev/docs/reference/vrl/functions/#parse-functions) for common log formats. + Before reaching for the `parse_regex` function, see if a Remap [`parse_*` function](https://vector.dev/docs/reference/vrl/functions/#parse-functions) + already exists for your format. If not, please consider [opening an issue](https://github.com/timberio/vector/issues/new?labels=type%3A+new+feature) to + request support. + """, """ + All values are returned as strings, it is recommended to manually coerce values as you see fit. + """] + arguments: [{ + name: "value" + description: "The string to search." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "The regular expression pattern to search against." + required: true + multiple: false + type: ["regex"] + }] + internal_failure_reasons: ["`value` fails to parse via the provided `pattern`"] + return: { + types: ["array"] + rules: ["Matches will return all capture groups corresponding to the leftmost matches in the text.", "If no match is found an error is raised."] + } + examples: [{ + title: "Parse via Regex (all matches)" + source: "parse_regex_all(\"first group and second group.\", r'(?P.*?) group')" + return: [{ + number: "first" + "0": "first group" + "1": "first" + }, { + number: "second" + "0": "second group" + "1": "second" + }] + }] + } + uuid_v4: { + anchor: "uuid_v4" + name: "uuid_v4" + category: "Random" + description: "Generates a random [UUIDv4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) string." + notices: [] + arguments: [] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Create a UUIDv4" + source: "uuid_v4()" + return: "1d262f4f-199b-458d-879f-05fd0a5f0683" + }] + } + to_float: { + anchor: "to_float" + name: "to_float" + category: "Coerce" + description: "Coerces the `value` into a float." + notices: [] + arguments: [{ + name: "value" + description: "The string that is to be converted to a float. Must be the string representation of a `float`, otherwise an `ArgumentError` will be raised." + required: true + multiple: false + type: ["float", "integer", "boolean", "string"] + }] + internal_failure_reasons: ["`value` is not a supported float representation"] + return: { + types: ["float"] + rules: ["If `value` is a string, it must be the string representation of an float or else an error is raised.", "If `value` is a boolean, `0.0` will be returned for `false` and `1.0` will be returned for `true`."] + } + examples: [{ + title: "Coerce to a float" + source: "to_float(\"3.145\")" + return: 3.145 + }] + } + parse_aws_cloudwatch_log_subscription_message: { + anchor: "parse_aws_cloudwatch_log_subscription_message" + name: "parse_aws_cloudwatch_log_subscription_message" + category: "Parse" + description: """ + Parses AWS CloudWatch Logs events (configured through AWS Cloudwatch subscriptions) coming from the + `aws_kinesis_firehose` source. + """ + notices: [] + arguments: [{ + name: "value" + description: "The string representation of the message to parse." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted AWS Cloudwatch Log subscription message"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse AWS Cloudwatch Log subscription message" + input: { + log: { + message: """ + { + "messageType": "DATA_MESSAGE", + "owner": "111111111111", + "logGroup": "test", + "logStream": "test", + "subscriptionFilters": [ + \t"Destination" + ], + "logEvents": [ + \t{ + \t "id": "35683658089614582423604394983260738922885519999578275840", + \t "timestamp": 1600110569039, + \t "message": "{"bytes":26780,"datetime":"14/Sep/2020:11:45:41 -0400","host":"157.130.216.193","method":"PUT","protocol":"HTTP/1.0","referer":"https://www.principalcross-platform.io/markets/ubiquitous","request":"/expedite/convergence","source_type":"stdin","status":301,"user-identifier":"-"}" + \t} + ] + } + """ + } + } + source: "parse_aws_cloudwatch_log_subscription_message(.message)" + output: { + log: { + owner: "111111111111" + message_type: "DATA_MESSAGE" + log_group: "test" + log_stream: "test" + subscription_filters: ["Destination"] + log_events: [{ + id: "35683658089614582423604394983260738922885519999578275840" + message: "{\"bytes\":26780,\"datetime\":\"14/Sep/2020:11:45:41 -0400\",\"host\":\"157.130.216.193\",\"method\":\"PUT\",\"protocol\":\"HTTP/1.0\",\"referer\":\"https://www.principalcross-latform.io/markets/ubiquitous\",\"request\":\"/expedite/convergence\",\"source_type\":\"stdin\",\"status\":301,\"user-identifier\":\"-\"}" + timestamp: "2020-09-14T19:09:29.039Z" + }] + } + } + }] + } + parse_key_value: { + anchor: "parse_key_value" + name: "parse_key_value" + category: "Parse" + description: """ + Parses the `value` in key/value format. Also known as [logfmt](https://brandur.org/logfmt). + + * Keys and values can be wrapped with `"`. + * `"` characters can be escaped by `\\`. + """ + notices: [""" + All values are returned as strings, it is recommended to manually coerce values as you see fit. + """] + arguments: [{ + name: "value" + description: "The string to parse." + required: true + multiple: false + type: ["string"] + }, { + name: "key_value_delimiter" + description: "The string that separates the key from the value." + required: false + multiple: false + default: "=" + type: ["string"] + }, { + name: "field_delimiter" + description: "The string that separates each key/value pair." + required: false + multiple: false + default: " " + type: ["string"] + }] + internal_failure_reasons: ["`value` is not a properly formatted key/value string"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse logfmt log" + source: """ + parse_key_value( + \t"@timestamp=\\"Sun Jan 10 16:47:39 EST 2021\\" level=info msg=\\"Stopping all fetchers\\" tag#production=stopping_fetchers id=ConsumerFetcherManager-1382721708341 module=kafka.consumer.ConsumerFetcherManager" + ) + """ + return: { + "@timestamp": "Sun Jan 10 16:47:39 EST 2021" + level: "info" + msg: "Stopping all fetchers" + "tag#production": "stopping_fetchers" + id: "ConsumerFetcherManager-1382721708341" + module: "kafka.consumer.ConsumerFetcherManager" + } + }, { + title: "Parse comma delimited log" + source: """ + parse_key_value( + \t"path:\\"/cart_link\\", host:store.app.com, fwd: \\"102.30.171.16\\", dyno: web.1 connect:0ms, service:87ms, status:304, bytes:632, protocol:https", + \tfield_delimiter: ",", + \tkey_value_delimiter: ":" + ) + """ + return: { + path: "/cart_link" + host: "store.app.com" + fwd: "102.30.171.16" + dyno: "web.1" + connect: "0ms" + service: "87ms" + status: "304" + bytes: "632" + protocol: "https" + } + }] + } + ends_with: { + anchor: "ends_with" + name: "ends_with" + category: "String" + description: "Determines if the `value` ends with the `substring`." + notices: [] + arguments: [{ + name: "value" + description: "The string to search." + required: true + multiple: false + type: ["string"] + }, { + name: "substring" + description: "The substring `value` must end with." + required: true + multiple: false + type: ["string"] + }, { + name: "case_sensitive" + description: "Should the match be case sensitive?" + required: false + multiple: false + type: ["boolean"] + default: true + }] + internal_failure_reasons: [] + return: { + types: ["boolean"] + } + examples: [{ + title: "String ends with (case sensitive)" + source: "ends_with(\"The Needle In The Haystack\", \"The Haystack\")" + return: true + }, { + title: "String ends with (case insensitive)" + source: "ends_with(\"The Needle In The Haystack\", \"the haystack\", case_sensitive: false)" + return: true + }] + } + sha2: { + anchor: "sha2" + name: "sha2" + category: "Hash" + description: "Calculates a [SHA-2](https://en.wikipedia.org/wiki/SHA-2) hash of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The string to calculate the hash for." + required: true + multiple: false + type: ["string"] + }, { + name: "variant" + description: "The variant of the algorithm to use." + enum: { + "SHA-224": "SHA-224 algorithm" + "SHA-256": "SHA-256 algorithm" + "SHA-384": "SHA-384 algorithm" + "SHA-512": "SHA-512 algorithm" + "SHA-512/224": "SHA-512/224 algorithm" + "SHA-512/256": "SHA-512/256 algorithm" + } + required: false + multiple: false + default: "SHA-512/256" + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Calculate sha2 hash" + source: "sha2(\"foo\", variant = \"SHA-512/224\")" + return: "d68f258d37d670cfc1ec1001a0394784233f88f056994f9a7e5e99be" + }] + } + to_syslog_facility: { + anchor: "to_syslog_facility" + name: "to_syslog_facility" + category: "Coerce" + description: """ + Coerces the `value`, a Syslog [facility code](https://en.wikipedia.org/wiki/Syslog#Facility), into its corresponding + Syslog keyword. i.e. 0 into `"kern"`, 1 into `"user"`, etc. + """ + notices: [] + arguments: [{ + name: "value" + description: "The facility code." + required: true + multiple: false + type: ["integer"] + }] + internal_failure_reasons: ["`value` is not a valid Syslog [facility code](https://en.wikipedia.org/wiki/Syslog#Facility)."] + return: { + types: ["string"] + } + examples: [{ + title: "Coerce to a Syslog facility" + source: "to_syslog_facility(\"4\")" + return: "auth" + }] + } + downcase: { + anchor: "downcase" + name: "downcase" + category: "String" + description: """ + Downcases the `value`. + + "Downcase" is defined according to the terms of the Unicode Derived Core Property Lowercase. + """ + notices: [] + arguments: [{ + name: "value" + description: "The string to convert to lowercase." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Downcase a string" + source: "downcase(\"Hello, World!\")" + return: "hello, world!" + }] + } + ipv6_to_ipv4: { + anchor: "ipv6_to_ipv4" + name: "ipv6_to_ipv4" + category: "IP" + description: """ + Converts the `ip` to an IPv4 address. + + If the parameter is already an IPv4 address it is passed through untouched. If it is an IPv6 address it has + to be an IPv4 compatible address. + """ + notices: [] + arguments: [{ + name: "ip" + description: "The IPv4 mapped IPv6 address to convert." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`ip` is not a valid IP address", "`ip` is an IPv6 address that is not compatible with IPv4"] + return: { + types: ["string"] + rules: ["If the parameter is already an IPv4 address it is passed through untouched. If it is an IPv6 address it has to be an IPv4 compatible address."] + } + examples: [{ + title: "IPv6 to IPv4" + source: "ipv6_to_ipv4(\"::ffff:192.168.0.1\")" + return: "192.168.0.1" + }] + } + parse_grok: { + anchor: "parse_grok" + name: "parse_grok" + category: "Parse" + description: """ + Parses the `value` using the [`grok` format](https://grokdebug.herokuapp.com/). + + All patterns [listed here](https://github.com/daschl/grok/tree/master/patterns) are supported. + """ + notices: [""" + It is recommended to use maintained Grok patterns when possible, since they will be improved over time + by the community. + """] + arguments: [{ + name: "value" + description: "The string to parse." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "The [Grok pattern](https://github.com/daschl/grok/tree/master/patterns)." + required: true + multiple: false + type: ["string"] + }, { + name: "remove_empty" + description: "If set to true, any patterns that resolve to an empty value will be removed from the result." + required: false + multiple: false + default: true + type: ["boolean"] + }] + internal_failure_reasons: ["`value` fails to parse via the provided `pattern`"] + return: { + types: ["object"] + } + examples: [{ + title: "Parse via Grok" + source: """ + parse_grok( + \t"2020-10-02T23:22:12.223222Z info Hello world", + \t"%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level} %{GREEDYDATA:message}" + ) + """ + return: { + timestamp: "2020-10-02T23:22:12.223222Z" + level: "info" + message: "Hello world" + } + }] + } + ceil: { + anchor: "ceil" + name: "ceil" + category: "Number" + description: "Rounds the `value` up to the specified `precision`." + notices: [] + arguments: [{ + name: "value" + description: "The number to round up." + required: true + multiple: false + type: ["integer", "float"] + }, { + name: "precision" + description: "The number of decimal places to round to." + required: false + multiple: false + default: 0 + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["integer", "float"] + rules: ["If `precision` is `0`, then an integer is returned, otherwise a float is returned."] + } + examples: [{ + title: "Round a number up (without precision)" + source: "ceil(4.345)" + return: 4 + }, { + title: "Round a number up (with precision)" + source: "ceil(4.345, precision: 2)" + return: 4.35 + }] + } + ip_cidr_contains: { + anchor: "ip_cidr_contains" + name: "ip_cidr_contains" + category: "IP" + description: "Determines if the `ip` is contained within the block referenced by the `cidr`." + notices: [] + arguments: [{ + name: "cidr" + description: "The CIDR mask - either v4 or v6." + required: true + multiple: false + type: ["string"] + }, { + name: "ip" + description: "The ip address - either a v4 or a v6 address." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: ["`cidr` is not a valid CIDR", "`ip` is not a valid IP address"] + return: { + types: ["boolean"] + } + examples: [{ + title: "IPv4 contains CIDR" + source: "ip_cidr_contains(\"192.168.0.0/16\", \"192.168.10.32\")" + return: true + }, { + title: "IPv6 contains CIDR" + source: "ip_cidr_contains(\"2001:4f8:4:ba::/64\", \"2001:4f8:4:ba:2e0:81ff:fe22:d1f1\")" + return: true + }] + } + length: { + anchor: "length" + name: "length" + category: "Enumerate" + description: "Returns the length of the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The array or object" + required: true + multiple: false + type: ["array", "object", "string"] + }] + internal_failure_reasons: [] + return: { + types: ["integer"] + rules: ["If `value` is an array, the size of the array is returned.", "If `value` is a string, the size of the string is returned.", "If `value` is a map, the number of map keys is returned (nested keys are ignored)"] + } + examples: [{ + title: "Length (object)" + source: """ + length({ + \t"portland": "Trail Blazers" + \t"seattle": "Supersonics" + }) + """ + return: 2 + }, { + title: "Length (nested object)" + source: """ + length({ + \t"home": { + \t\t"city": "Portland" + \t\t"state": "Oregon" + \t} + \t"name": "Trail Blazers" + \t"mascot": { + \t\t"name": "Blaze the Trail Cat" + \t} + }) + """ + return: 3 + }, { + title: "Length (array)" + source: "length([\"Trail Blazers\", \"Supersonics\", \"Grizzlies\"])" + return: 3 + }, { + title: "Length (string)" + source: "length(\"The Planet of the Apes Musical\")" + return: 30 + }] + } + to_timestamp: { + anchor: "to_timestamp" + name: "to_timestamp" + category: "Coerce" + description: "Coerces the `value` into a timestamp." + notices: [] + arguments: [{ + name: "value" + description: "The value that is to be converted to a timestamp. If a string, must be a valid representation of a `timestamp`, and no `default` exists, an `ArgumentError` will be raised." + required: true + multiple: false + type: ["string", "integer", "timestamp"] + }] + internal_failure_reasons: ["When `value` is a `string`, it is not a valid timestamp format", "When `value` is an `int`, it is not within the Unix timestamp range"] + return: { + types: ["timestamp"] + rules: ["If `value` is a `string`, the timestamp is parsed in these formats.", "If `value` is an `integer`, it assumed to be a Unix representation of the timestamp (the number of seconds after January 1st, 1970)."] + } + examples: [{ + title: "Coerce to a timestamp" + source: "to_timestamp(\"2020-10-21T16:00:00Z\")" + return: "2020-10-21T16:00:00Z" + }] + } + object: { + anchor: "object" + name: "object" + category: "Type" + description: """ + Errors if `value` is not an object, if `value` is an object it is returned. + + This allows the type checker to guarantee that the returned value is an object and can be used in any function + that expects this type. + """ + notices: [] + arguments: [{ + name: "value" + description: "The value to ensure is an object." + required: true + multiple: false + type: ["any"] + }] + internal_failure_reasons: ["`value` is not an object."] + return: { + types: ["object"] + rules: [#"If `value` is an object then it is returned."#, #"Otherwise an error is raised."#] + } + examples: [{ + title: "Declare an object type" + input: { + log: { + value: { + field1: "value1" + field2: "value2" + } + } + } + source: "object(.value)" + return: { + field1: "value1" + field2: "value2" + } + }] + } + to_syslog_level: { + anchor: "to_syslog_level" + name: "to_syslog_level" + category: "Coerce" + description: """ + Coerces the `value`, a Syslog [severity level](https://en.wikipedia.org/wiki/Syslog#Severity_level), into its corresponding keyword, + i.e. 0 into `"emerg"`, 1 into `"alert", etc. + """ + notices: [] + arguments: [{ + name: "value" + description: "The severity level." + required: true + multiple: false + type: ["integer"] + }] + internal_failure_reasons: ["`value` is not a valid Syslog [severity level](https://en.wikipedia.org/wiki/Syslog#Severity_level)."] + return: { + types: ["string"] + } + examples: [{ + title: "Coerce to a Syslog level" + source: "to_syslog_level(\"5\")" + return: "notice" + }] + } + join: { + anchor: "join" + name: "join" + category: "String" + description: """ + Joins each string in the `value` array into a single string, with items optionally separated from one another + by a `separator`. + """ + notices: [] + arguments: [{ + name: "value" + description: "The array of strings to join together." + required: true + multiple: false + type: ["array"] + }, { + name: "separator" + description: "The string separating each original element when joined." + required: false + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Join array (no separator)" + source: "join([\"bring\", \"us\", \"together\"])" + return: "bringustogether" + }, { + title: "Join array (comma separator)" + source: "join([\"sources\", \"transforms\", \"sinks\"], separator: \", \")" + return: "sources, transforms, sinks" + }] + } + strip_ansi_escape_codes: { + anchor: "strip_ansi_escape_codes" + name: "strip_ansi_escape_codes" + category: "String" + description: "Strips [ANSI escape codes](https://en.wikipedia.org/wiki/ANSI_escape_code) from the `value`." + notices: [] + arguments: [{ + name: "value" + description: "The string to strip." + required: true + multiple: false + type: ["string"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + } + examples: [{ + title: "Strip ANSI escape codes" + source: "strip_ansi_escape_codes(\"\\e[46mfoo\\e[0m bar\")" + return: "foo bar" + }] + } + split: { + anchor: "split" + name: "split" + category: "String" + description: "Splits the `value` via the `pattern`." + notices: [] + arguments: [{ + name: "value" + description: "The string to split." + required: true + multiple: false + type: ["string"] + }, { + name: "pattern" + description: "The string is split whenever this pattern is matched." + required: true + multiple: false + type: ["string", "regex"] + }, { + name: "limit" + description: "The maximum number of substrings to return." + required: false + multiple: false + type: ["integer"] + }] + internal_failure_reasons: [] + return: { + types: ["string"] + rules: ["If `limit` is specified, after `limit` has been reached, the remainder of the string is returned unsplit."] + } + examples: [{ + title: "Split a string (no limit)" + source: "split(\"apples and pears and bananas\", \" and \")" + return: ["apples", "pears", "bananas"] + }, { + title: "Split a string (with a limit)" + source: "split(\"apples and pears and bananas\", \" and \", limit: 1)" + return: ["apples", "pears and bananas"] + }] + } + } + literals: { + object: { + anchor: "object" + name: "object" + title: "Object" + description: """ + An _object_ literal is a growable key/value structure that is syntactically equivalent to a JSON object. + + A well-formed JSON document is a valid VRL object. + """ + characteristics: { + ordering: { + anchor: "ordering" + name: "ordering" + title: "Ordering" + description: """ + Object fields are ordered alphabetically by the key in ascending order. Therefore, operations like + encoding into JSON produce a string with keys that are in ascending alphabetical order. + """ + } + } + examples: [""" + { + "field1": "value1", + "field2": [ "value2", "value3", "value4" ], + "field3": { "field4": "value5" } + } + """, """ + { + "field1": .some_path, + "field2": some_variable, + "field3": { "subfield": "some value" } + } + """] + } + float: { + anchor: "float" + name: "float" + title: "Float" + description: """ + A _float_ literal is a decimal representation of a 64-bit floating-point type (specifically, the "binary64" type + defined in IEEE 754-2008). + + A decimal floating-point literal consists of an integer part (decimal digits), a decimal point, a fractional + part (decimal digits). + """ + characteristics: { + limits: { + anchor: "limits" + name: "limits" + title: "Limits" + description: """ + Floats in VRL can range from `-1.7976931348623157E+308f64` to `1.7976931348623157E+308f64`. Floats outside that + range are wrapped. + """ + } + underscores: { + anchor: "underscores" + name: "underscores" + title: "Underscores" + description: """ + Floats can use underscore (`_`) characters instead of `,` to make them human readable. For + example, `1_000_000`. + """ + } + } + examples: ["1_000_000.01", "1000000.01", "1.001"] + } + string: { + anchor: "string" + name: "string" + title: "String" + description: """ + A _string_ literal is a [UTF-8–encoded](https://en.wikipedia.org/wiki/UTF-8) string. String literals can be raw or interpreted. + + **Raw string** literals are composed of the\tuninterpreted (implicitly UTF-8-encoded) characters between single + quotes identified with the `s` sigil and wrapped with single quotes (`s'...'`); in particular, backslashes have + no special meaning and the string may contain newlines. + + **Interpreted string** literals are character sequences between double quotes (`"..."`). Within the quotes, + any character may appear except newline and unescaped double quote. The text between the quotes forms the result + of the literal, with backslash escapes interpreted as defined below. + """ + examples: [#""" + "Hello, world! 🌎" + """#, #""" + "Hello, world! \\u1F30E" + """#, #""" + s'Hello, world!' + """#, #""" + s'{ "foo": "bar" }' + """#] + characteristics: { + backslash_escapes: { + anchor: "backslash_escapes" + title: "Backslash escapes" + description: "Special characters, such as newlines, can be expressed with a backslash escape." + enum: { + "`\\u{7FFF}`": "24-bit Unicode character code (up to 6 digits)" + "`\\n`": "Newline" + "`\\r`": "Carriage return" + "`\\t`": "Tab" + "`\\\\`": "Backslash" + "`\\0`": "Null" + "`\\\"`": "Double quote" + "`\\'`": "Single quote" + } + name: "backslash_escapes" + } + concatenation: { + anchor: "concatenation" + name: "concatenation" + title: "Concatenation" + description: "Strings can be concatenated with the `+` operator." + } + invalid_characters: { + anchor: "invalid_characters" + name: "invalid_characters" + title: "Invalid Characters" + description: "Invalid UTF-8 sequences are replaced with the `�` character." + } + } + } + integer: { + anchor: "integer" + name: "integer" + title: "Integer" + description: "An _integer_ literal is a sequence of digits representing a 64-bit signed integer type." + characteristics: { + ordering: { + anchor: "ordering" + name: "ordering" + title: "Limits" + description: """ + Integers in VRL can range from `-9223372036854775807` to `9223372036854775807`. Integers outside that range are + wrapped. + """ + } + underscore: { + anchor: "underscore" + name: "underscore" + title: "Underscore" + description: """ + Integers can use underscore (`_`) characters instead of `,` to make them human readable. For + example, `1_000_000`. + """ + } + } + examples: ["1_000_000", "1000000"] + } + array: { + anchor: "array" + name: "array" + title: "Array" + description: "An _array_ literal is a comma-delimited set of expressions that represents a contiguous growable array type." + characteristics: {} + examples: [#"[]"#, #"["first", "second", "third"]"#, #"["mixed", 1, 1.0, true, false, {"foo": "bar"}]"#, #"["first-level", ["second-level", ["third-level"]]"#, #"[.field1, .field2, to_int!("2"), variable_1]"#, #""" + [ + "expressions", + 1 + 2, + 2 == 5, + true || false + ] + """#] + } + null: { + anchor: "null" + name: "null" + title: "Null" + description: "A _null_ literal is the absence of a defined value." + characteristics: {} + examples: ["null"] + } + regular_expression: { + anchor: "regular_expression" + name: "regular_expression" + title: "Regular Expression" + description: """ + A _regular expression_ literal represents a [Regular Expression](https://en.wikipedia.org/wiki/Regular_expression) used for string matching and + parsing. + + Regular expressions are defined by the `r` sigil and wrapped with single quotes (`r'...'`). The value between + the quotes uses the [Rust regex syntax](https://docs.rs/regex/latest/regex/#syntax). + """ + examples: [#"r'^Hello, World!$'"#, #"r'^Hello, World!$'i"#, #"r'^\d{4}-\d{2}-\d{2}$'"#, #"r'(?P\d{4})-(?P\d{2})-(?P\d{2})'"#] + characteristics: { + flags: { + anchor: "flags" + title: "Flags" + description: """ + Regular expressions allow for flags. Flags can be combined, as in `r'(?ixm)pattern'`, + `r'(?im)pattern'`, etc. + + To learn more about regular expressions in Rust—and by extension in VRL—we strongly\trecommend the + in-browser [Rustexp expression editor and tester](https://rustexp.lpil.uk/). + """ + enum: { + x: "Ignore whitespace" + i: "Case insensitive" + m: "Multi-line mode" + } + name: "flags" + } + named_captures: { + anchor: "named_captures" + name: "named_captures" + title: "Named Captures" + description: """ + Regular expressions support named capture groups, allowing extractions to be associated with keys. + Named captures should be preceded with a `?P` declaraction. This regex, for example... + + ```vrl + r'(?P\\d{4})-(?P\\d{2})-(?P\\d{2})' + ``` + + ...extracts captures with the `y`, `m`, and `d` keys. + """ + } + } + } + timestamp: { + anchor: "timestamp" + name: "timestamp" + title: "Timestamp" + description: """ + A _timestamp_ literal defines a native timestamp expressed in the [RFC 3339 format](https://tools.ietf.org/html/rfc3339) with a + nanosecond precision. + + Timestamp literals are defined by the `t` sigil and wrapped with single quotes (`t'2021-02-11T10:32:50.553955473Z'`). + """ + examples: [#""" + t'2021-02-11T10:32:50.553955473Z' + """#, #""" + t'2021-02-11T10:32:50.553Z' + """#, #""" + t'2021-02-11T10:32:50.553-04:00' + """#] + characteristics: { + timezones: { + anchor: "timezones" + name: "timezones" + title: "Timezones" + description: "As defined in [RFC 3339 format](https://tools.ietf.org/html/rfc3339), timestamp literals support UTC and local offsets." + } + } + } + boolean: { + anchor: "boolean" + name: "boolean" + title: "Boolean" + description: "A _Boolean_ literal represents a binary value which can only be either `true` or `false`." + characteristics: {} + examples: ["true", "false"] + } + } + principles: { + performance: { + anchor: "performance" + name: "performance" + title: "Performance" + description: """ + VRL is implemented in the very fast and efficient [Rust](https://www.rust-lang.org/) language and + VRL scripts are compiled into Rust code when Vector is started. This means that you can use VRL to + transform observability data with a minimal per-event performance penalty vis-à-vis pure Rust. In addition, + ergonomic features such as compile-time correctness checks and the lack of language constructs like + loops make it difficult to write scripts that are slow or buggy or require optimization. + """ + } + safety: { + anchor: "safety" + name: "safety" + title: "Safety" + description: """ + VRL is a safe language in several senses: VRL scripts have access only to the event data + that they handle and not, for example, to the Internet or the host; VRL provides the same strong memory + safety guarantees as Rust; and, as mentioned above, compile-time correctness checks prevent VRL + scripts from behaving in unexpected or sub-optimal ways. These factors distinguish VRL from other + available event data transformation languages and runtimes. + """ + } + } + syntax: { + whitespace: { + anchor: "whitespace" + name: "whitespace" + title: "Whitespace" + description: """ + Whitespace is any non-empty string as defined by the [Unicode `White_Space` property](https://en.wikipedia.org/wiki/Unicode_character_property#Whitespace). + + VRL is a "free-form" language, meaning that all forms of whitespace serve only to separate tokens in the + grammar, and have no semantic significance. + """ + } + keywords: { + anchor: "keywords" + name: "keywords" + title: "Keywords" + description: """ + Keywords are reserved words that are used for primitive language features, such as `if`, and cannot be used as + variable assignments or other custom directives. The following words are reserved: + + * `abort` + * `as` + * `break` + * `continue` + * `else` + * `false` + * `for` + * `if` + * `impl` + * `in` + * `let` + * `loop` + * `null` + * `return` + * `self` + * `std` + * `then` + * `this` + * `true` + * `type` + * `until` + * `use` + * `while` + """ + } + comment: { + anchor: "comment" + name: "comment" + title: "Comment" + description: """ + A _comment_ serves as program documentation and is identified with `#`. Each line must be preceeded with a + `#` character. VRL currently does not allow for block comments. + """ + examples: ["# comment", """ + # multi-line + # comment + """] + } + } +} +guides: { + integrate: { + sources: { + internal_logs: { + config: { + sources: { + in: { + type: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "internal_logs" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + stdin: { + config: { + sources: { + in: { + host_key: null + max_length: null + type: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "stdin" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + file: { + config: { + sources: { + in: { + exclude: null + file_key: null + fingerprint: null + glob_minimum_cooldown: null + host_key: null + ignore_not_found: null + ignore_older: null + include: null + line_delimiter: null + max_line_bytes: null + max_read_bytes: null + oldest_first: null + remove_after: null + read_from: null + ignore_checkpoints: null + type: null + multiline: null + data_dir: null + encoding: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "file" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + prometheus_scrape: { + config: { + sources: { + in: { + endpoints: null + scrape_interval_secs: null + auth: null + type: null + tls: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + generator: { + config: { + sources: { + in: { + format: null + interval: null + count: null + lines: null + sequence: null + type: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "generator" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + journald: { + config: { + sources: { + in: { + batch_size: null + current_boot_only: null + exclude_units: null + include_units: null + journalctl_path: null + type: null + data_dir: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "journald" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + internal_metrics: { + config: { + sources: { + in: { + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + address: null + access_key: null + type: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "aws_kinesis_firehose" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + heroku_logs: { + config: { + sources: { + in: { + address: null + auth: null + query_parameters: null + type: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "heroku_logs" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + kafka: { + config: { + sources: { + in: { + auto_offset_reset: null + bootstrap_servers: null + commit_interval_ms: null + fetch_wait_max_ms: null + group_id: null + key_field: null + topic_key: null + partition_key: null + offset_key: null + librdkafka_options: null + sasl: null + session_timeout_ms: null + socket_timeout_ms: null + topics: null + type: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "kafka" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + http: { + config: { + sources: { + in: { + address: null + encoding: null + headers: null + auth: null + query_parameters: null + type: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "http" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + nginx_metrics: { + config: { + sources: { + in: { + endpoints: null + scrape_interval_secs: null + namespace: null + tls: null + auth: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + docker_logs: { + config: { + sources: { + in: { + docker_host: null + tls: null + auto_partial_merge: null + exclude_containers: null + include_containers: null + include_labels: null + include_images: null + retry_backoff_secs: null + host_key: null + type: null + multiline: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "docker_logs" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + splunk_hec: { + config: { + sources: { + in: { + address: null + token: null + type: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "splunk_hec" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + apache_metrics: { + config: { + sources: { + in: { + endpoints: null + scrape_interval_secs: null + namespace: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + syslog: { + config: { + sources: { + in: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "syslog" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + mongodb_metrics: { + config: { + sources: { + in: { + endpoints: null + scrape_interval_secs: null + namespace: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + statsd: { + config: { + sources: { + in: { + address: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + vector: { + config: { + sources: { + in: { + address: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "vector" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + postgresql_metrics: { + config: { + sources: { + in: { + endpoints: null + scrape_interval_secs: null + namespace: null + include_databases: null + exclude_databases: null + tls: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + kubernetes_logs: { + config: { + sources: { + in: { + annotation_fields: null + auto_partial_merge: null + self_node_name: null + exclude_paths_glob_patterns: null + extra_field_selector: null + extra_label_selector: null + timezone: null + type: null + data_dir: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "kubernetes_logs" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + host_metrics: { + config: { + sources: { + in: { + collectors: null + namespace: null + scrape_interval_secs: null + disk: null + filesystem: null + network: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + aws_s3: { + config: { + sources: { + in: { + auth: null + endpoint: null + region: null + strategy: null + compression: null + sqs: null + type: null + multiline: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "aws_s3" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + socket: { + config: { + sources: { + in: { + address: null + host_key: null + max_length: null + mode: null + path: null + shutdown_timeout_secs: null + type: null + keepalive: null + tls: null + } + } + } + sinks: { + vector: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "vector" + inputs: ["in"] + } + } + } + } + azure_monitor_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "azure_monitor_logs" + inputs: ["in"] + } + } + } + } + gcp_pubsub: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "gcp_pubsub" + inputs: ["in"] + } + } + } + } + console: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "console" + inputs: ["in"] + } + } + } + } + influxdb_metrics: {} + aws_sqs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "aws_sqs" + inputs: ["in"] + } + } + } + } + socket: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "socket" + inputs: ["in"] + } + } + } + } + influxdb_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "influxdb_logs" + inputs: ["in"] + } + } + } + } + prometheus_remote_write: {} + aws_s3: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "aws_s3" + inputs: ["in"] + } + } + } + } + datadog_metrics: {} + nats: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "nats" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "aws_cloudwatch_logs" + inputs: ["in"] + } + } + } + } + elasticsearch: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "elasticsearch" + inputs: ["in"] + } + } + } + } + aws_kinesis_streams: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "aws_kinesis_streams" + inputs: ["in"] + } + } + } + } + splunk_hec: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "splunk_hec" + inputs: ["in"] + } + } + } + } + sematext_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "sematext_logs" + inputs: ["in"] + } + } + } + } + humio_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "humio_logs" + inputs: ["in"] + } + } + } + } + papertrail: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "papertrail" + inputs: ["in"] + } + } + } + } + loki: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "loki" + inputs: ["in"] + } + } + } + } + humio_metrics: {} + http: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "http" + inputs: ["in"] + } + } + } + } + datadog_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "datadog_logs" + inputs: ["in"] + } + } + } + } + kafka: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "kafka" + inputs: ["in"] + } + } + } + } + aws_kinesis_firehose: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "aws_kinesis_firehose" + inputs: ["in"] + } + } + } + } + sematext_metrics: {} + honeycomb: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "honeycomb" + inputs: ["in"] + } + } + } + } + gcp_stackdriver_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "gcp_stackdriver_logs" + inputs: ["in"] + } + } + } + } + logdna: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "logdna" + inputs: ["in"] + } + } + } + } + blackhole: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "blackhole" + inputs: ["in"] + } + } + } + } + aws_cloudwatch_metrics: {} + file: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "file" + inputs: ["in"] + } + } + } + } + new_relic_logs: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "new_relic_logs" + inputs: ["in"] + } + } + } + } + gcp_cloud_storage: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "gcp_cloud_storage" + inputs: ["in"] + } + } + } + } + statsd: {} + pulsar: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "pulsar" + inputs: ["in"] + } + } + } + } + clickhouse: { + config: { + sources: { + in: { + type: "socket" + } + } + sinks: { + out: { + type: "clickhouse" + inputs: ["in"] + } + } + } + } + prometheus_exporter: {} + } + } + prometheus_remote_write: { + config: { + sources: { + in: { + address: null + auth: null + type: null + tls: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + aws_ecs_metrics: { + config: { + sources: { + in: { + endpoint: null + namespace: null + scrape_interval_secs: null + version: null + type: null + } + } + } + sinks: { + vector: {} + azure_monitor_logs: {} + gcp_pubsub: {} + console: {} + influxdb_metrics: {} + aws_sqs: {} + socket: {} + influxdb_logs: {} + prometheus_remote_write: {} + aws_s3: {} + datadog_metrics: {} + nats: {} + aws_cloudwatch_logs: {} + elasticsearch: {} + aws_kinesis_streams: {} + splunk_hec: {} + sematext_logs: {} + humio_logs: {} + papertrail: {} + loki: {} + humio_metrics: {} + http: {} + datadog_logs: {} + kafka: {} + aws_kinesis_firehose: {} + sematext_metrics: {} + honeycomb: {} + gcp_stackdriver_logs: {} + logdna: {} + blackhole: {} + aws_cloudwatch_metrics: {} + file: {} + new_relic_logs: {} + gcp_cloud_storage: {} + statsd: {} + pulsar: {} + clickhouse: {} + prometheus_exporter: {} + } + } + } + sinks: { + vector: "hi" + azure_monitor_logs: "hi" + gcp_pubsub: "hi" + console: "hi" + influxdb_metrics: "hi" + aws_sqs: "hi" + socket: "hi" + influxdb_logs: "hi" + prometheus_remote_write: "hi" + aws_s3: "hi" + datadog_metrics: "hi" + nats: "hi" + aws_cloudwatch_logs: "hi" + elasticsearch: "hi" + aws_kinesis_streams: "hi" + splunk_hec: "hi" + sematext_logs: "hi" + humio_logs: "hi" + papertrail: "hi" + loki: "hi" + humio_metrics: "hi" + http: "hi" + datadog_logs: "hi" + kafka: "hi" + aws_kinesis_firehose: "hi" + sematext_metrics: "hi" + honeycomb: "hi" + gcp_stackdriver_logs: "hi" + logdna: "hi" + blackhole: "hi" + aws_cloudwatch_metrics: "hi" + file: "hi" + new_relic_logs: "hi" + gcp_cloud_storage: "hi" + statsd: "hi" + pulsar: "hi" + clickhouse: "hi" + prometheus_exporter: "hi" + } + } +} +team: [{ + id: "ben" + name: "Ben" + twitter: "https://twitter.com/binarylogic" + keybase: "https://keybase.io/binarylogic" + avatar: "https://github.com/binarylogic.png" + bio: """ + Ben is the CTO/Co-Founder at Timber.io and a member of the + Vector team. + """ + github: "https://github.com/binarylogic" +}, { + id: "bruce" + name: "Bruce" + avatar: "https://github.com/bruceg.png" + bio: "Bruce is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/bruceg" +}, { + id: "james" + name: "James" + twitter: "https://twitter.com/kartar" + keybase: "https://keybase.io/jamtur01" + avatar: "https://github.com/jamtur01.png" + bio: "James is the VP of Engineering at Timber.io and a member of the\tVector team." + github: "https://github.com/jamtur01" +}, { + id: "jean" + name: "Jean" + twitter: "https://twitter.com/JeanMertz" + keybase: "https://keybase.io/JeanMertz" + avatar: "https://github.com/JeanMertz.png" + bio: "Jean is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/JeanMertz" +}, { + id: "jesse" + name: "Jesse" + twitter: "https://twitter.com/jszwedko" + keybase: "https://keybase.io/jszwedko" + avatar: "https://github.com/jszwedko.png" + bio: "Jesse is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/jszwedko" +}, { + id: "kirill" + name: "Kirill" + avatar: "https://github.com/fanatid.png" + keybase: "https://keybase.io/fanatid" + bio: "Jean is an engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/fanatid" +}, { + id: "kruno" + name: "Kruno" + avatar: "https://github.com/ktff.png" + bio: "Kruno is an engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/ktff" +}, { + id: "lee" + name: "Lee" + twitter: "https://twitter.com/leebenson" + keybase: "https://keybase.io/leebenson" + avatar: "https://github.com/leebenson.png" + bio: "Lee is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/leebenson" +}, { + id: "luc" + name: "Luc" + twitter: "https://twitter.com/lucperkins" + keybase: "https://keybase.io/lucperkins" + avatar: "https://github.com/lucperkins.png" + bio: "Luc is an engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/lucperkins" +}, { + id: "luke" + name: "Luke" + twitter: "https://twitter.com/lukesteensen" + keybase: "https://keybase.io/lukesteensen" + avatar: "https://github.com/lukesteensen.png" + bio: """ + Luke is a Senior Engineer at Timber.io and a member of the Vector team. + Before Timber, Luke was an engineer at Braintree working on parts of their + observability infrastructure. + """ + github: "https://github.com/lukesteensen" +}, { + id: "mike" + name: "Mike" + twitter: "https://twitter.com/MOZGIII" + keybase: "https://keybase.io/MOZGIII" + avatar: "https://github.com/MOZGIII.png" + bio: "Mike is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/MOZGIII" +}, { + id: "steve" + name: "Steve" + twitter: "https://twitter.com/sghall" + keybase: "https://keybase.io/sghall" + avatar: "https://github.com/sghall.png" + bio: "Steve is a senior engineer at Timber.io and a member of the\tVector team." + github: "https://github.com/sghall" +}, { + id: "vic" + name: "Vic" + avatar: "https://github.com/vector-vic.png" + bio: "Vic is the Vector mascot." + github: "https://github.com/vector-vic" +}, { + id: "zach" + name: "Zach" + twitter: "https://twitter.com/zsherman" + keybase: "https://keybase.io/zsherman" + avatar: "https://github.com/zsherman.png" + bio: "Zach is the CEO/co-founder of Timber.io." + github: "https://github.com/zsherman" +}] +api: { + description: """ + The Vector [GraphQL](https://graphql.org) API allows you to interact with a + running Vector instance, enabling introspection and management of + Vector in real-time. + """ + playground_url: "https://playground.vector.dev:8686/playground" + schema_json_url: "https://github.com/timberio/vector/blob/master/lib/vector-api-client/graphql/schema.json" + configuration: { + enabled: { + common: true + type: { + bool: { + default: false + } + } + required: false + description: "Whether the GraphQL API is enabled for this Vector instance." + name: "enabled" + warnings: [] + } + address: { + common: true + required: false + type: { + string: { + default: "127.0.0.1:8686" + examples: ["0.0.0.0:8686", "localhost:1234"] + syntax: "literal" + } + } + description: """ + The network address to which the API should bind. If you're running + Vector in a Docker container, make sure to bind to `0.0.0.0`. Otherwise + the API will not be exposed outside the container. + """ + name: "address" + warnings: [] + } + playground: { + common: false + required: false + type: { + bool: { + default: true + } + } + description: """ + Whether the [GraphQL Playground](https://github.com/graphql/graphql-playground) is enabled + for the API. The Playground is accessible via the `/playground` endpoint + of the address set using the `bind` parameter. + """ + name: "playground" + warnings: [] + } + } + endpoints: { + "/graphql": { + POST: { + description: """ + Main endpoint for receiving and processing + GraphQL queries. + """ + responses: { + "200": { + description: """ + The query has been processed. GraphQL returns 200 + regardless if the query was successful or not. This + is due to the fact that queries can partially fail. + Please check for the `errors` key to determine if + there were any errors in your query. + """ + } + } + } + } + "/health": { + GET: { + description: """ + Healthcheck endpoint. Useful to verify that + Vector is up and running. + """ + responses: { + "200": { + description: "Vector is initialized and running." + } + } + } + } + "/playground": { + GET: { + description: """ + A bundled GraphQL playground that allows you + to explore the available queries and manually + run queries. + + We offer a [public playground](https://playground.vector.dev:8686/playground) + that you can explore without hosting your own + Vector instance. + """ + responses: { + "200": { + description: "Vector is initialized and running." + } + } + } + } + } +} +cli: { + #Args: {} + #ArgType: "string" | "list" + #Commands: {} + #Flags: {} + #Options: {} + #OptionType: "string" | "integer" | "enum" + name: "vector" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + quiet: { + flag: "--quiet" + short: "-q" + description: """ + Reduce detail of internal logging. Repeat to reduce further. Overrides + `--verbose` + """ + name: "quiet" + } + "require-healthy": { + flag: "--require-healthy" + short: "-r" + description: "Exit on startup if any sinks fail healthchecks" + env_var: "VECTOR_REQUIRE_HEALTHY" + name: "require-healthy" + } + verbose: { + flag: "--verbose" + short: "-v" + description: "Enable more detailed logging. Repeat to reduce further. Overrides `--verbose`" + name: "verbose" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + "watch-config": { + flag: "--watch-config" + short: "-w" + description: "Watch for changes in the configuration file, and reload accordingly" + env_var: "VECTOR_WATCH_CONFIG" + name: "watch-config" + } + } + options: { + color: { + option: "--color" + description: "Control when ANSI terminal formatting is used." + default: "auto" + enum: { + always: "Enable ANSI terminal formatting always." + auto: "Detect ANSI terminal formatting and enable if supported." + never: "Disable ANSI terminal formatting." + } + name: "color" + type: "enum" + required: false + } + config: { + option: "--config" + short: "-c" + description: """ + Read configuration from one or more files. Wildcard paths are + supported. If zero files are specified the default config path + `/etc/vector/vector.toml` will be targeted. + TOML, YAML and JSON file formats are supported. + The format to interpret the file with is determined from + the file extension (.toml, .yaml, .json). + We will fallback to TOML if we are unable to detect + a supported format. + """ + type: "string" + default: "/etc/vector/vector.toml" + name: "config" + env_var: "VECTOR_CONFIG" + required: false + } + "config-toml": { + option: "--config-toml" + description: """ + Read configuration from one or more files. Wildcard paths are + supported. TOML file format is assumed. + """ + name: "config-toml" + type: "string" + env_var: "VECTOR_CONFIG_TOML" + required: true + } + "config-json": { + option: "--config-json" + description: """ + Read configuration from one or more files. Wildcard paths are + supported. JSON file format is assumed. + """ + name: "config-json" + type: "string" + env_var: "VECTOR_CONFIG_JSON" + required: true + } + "config-yaml": { + option: "--config-yaml" + description: """ + Read configuration from one or more files. Wildcard paths are + supported. YAML file format is assumed. + """ + name: "config-yaml" + type: "string" + env_var: "VECTOR_CONFIG_YAML" + required: true + } + threads: { + option: "--threads" + short: "-t" + description: """ + Number of threads to use for processing (default is number of + available cores) + """ + name: "threads" + type: "integer" + env_var: "VECTOR_THREADS" + required: true + } + "log-format": { + option: "--log-format" + description: "Set the logging format [default: text]" + default: "text" + enum: { + json: "Output Vector's logs as JSON." + text: "Output Vector's logs as text." + } + name: "log-format" + type: "enum" + required: false + } + } + commands: { + generate: { + description: "Generate a Vector configuration containing a list of components" + name: "generate" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + fragment: { + flag: "--fragment" + short: "-f" + description: "Whether to skip the generation of global fields" + name: "fragment" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + } + options: { + file: { + option: "--file" + description: "Generate config as a file" + name: "file" + type: "string" + example: "/etc/vector/my-config.toml" + required: true + } + } + args: { + pipeline: { + description: "Pipeline expression, e.g. `stdin/json_parser,add_fields/console`" + name: "pipeline" + type: "string" + } + } + } + help: { + description: "Prints this message or the help of the given subcommand(s)" + name: "help" + } + list: { + description: "List available components, then exit" + name: "list" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + } + options: { + format: { + option: "--format" + description: "Format the list in an encoding schema" + default: "text" + enum: { + avro: "Output components in Apache Avro format" + json: "Output components as JSON" + text: "Output components as text" + } + name: "format" + type: "enum" + required: false + } + } + } + test: { + description: """ + Run Vector config unit tests, then exit. This command is experimental and + therefore subject to change. For guidance on how to write unit tests check + out: https://vector.dev/docs/setup/guides/unit-testing/ + """ + name: "test" + options: { + "config-toml": { + option: "--config-toml" + description: """ + Test configuration from one or more files. Wildcard paths are + supported. TOML file format is assumed. + """ + name: "config-toml" + type: "string" + env_var: "VECTOR_CONFIG_TOML" + required: true + } + "config-json": { + option: "--config-json" + description: """ + Test configuration from one or more files. Wildcard paths are + supported. JSON file format is assumed. + """ + name: "config-json" + type: "string" + env_var: "VECTOR_CONFIG_JSON" + required: true + } + "config-yaml": { + option: "--config-yaml" + description: """ + Test configuration from one or more files. Wildcard paths are + supported. YAML file format is assumed. + """ + name: "config-yaml" + type: "string" + env_var: "VECTOR_CONFIG_YAML" + required: true + } + } + args: { + paths: { + description: """ + Any number of Vector config files to test. If none are specified + the default config path `/etc/vector/vector.toml` will be targeted + """ + name: "paths" + type: "list" + default: "/etc/vector/vector.toml" + } + } + } + top: { + description: """ + Display topology and metrics in the console, for a local or remote Vector + instance + """ + name: "top" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + "human-metrics": { + flag: "--human-metrics" + short: "-h" + description: """ + Humanize metrics, using numeric suffixes - e.g. 1,100 = 1.10 k, + 1,000,000 = 1.00 M + """ + name: "human-metrics" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + } + options: { + "refresh-interval": { + option: "--refresh-interval" + short: "-i" + description: "How often the screen refreshes (in milliseconds)" + type: "integer" + default: 500 + name: "refresh-interval" + required: false + } + url: { + option: "--url" + short: "-u" + description: "The URL for the GraphQL endpoint of the running Vector instance" + name: "url" + type: "string" + required: true + } + } + } + validate: { + description: "Validate the target config, then exit" + name: "validate" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + "no-environment": { + flag: "--no-environment" + short: "-ne" + description: """ + Disables environment checks. That includes component + checks and health checks + """ + name: "no-environment" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + "deny-warnings": { + flag: "--deny-warnings" + short: "-d" + description: "Fail validation on warnings" + name: "deny-warnings" + } + } + options: { + "config-toml": { + option: "--config-toml" + description: """ + Any number of Vector config files to validate. + TOML file format is assumed. + """ + name: "config-toml" + type: "string" + required: true + } + "config-json": { + option: "--config-json" + description: """ + Any number of Vector config files to validate. + JSON file format is assumed. + """ + name: "config-json" + type: "string" + required: true + } + "config-yaml": { + option: "--config-yaml" + description: """ + Any number of Vector config files to validate. + YAML file format is assumed. + """ + name: "config-yaml" + type: "string" + required: true + } + } + args: { + paths: { + description: """ + Any number of Vector config files to validate. If none are specified + the default config path `/etc/vector/vector.toml` will be targeted + """ + name: "paths" + type: "list" + default: "/etc/vector/vector.toml" + } + } + } + vrl: { + description: "Vector Remap Language CLI" + name: "vrl" + flags: { + help: { + flag: "--help" + short: "-h" + description: "Prints help information" + name: "help" + } + "print-object": { + flag: "--print-object" + short: "-o" + description: """ + Print the (modified) object, instead of the result of the final + expression. + + The same result can be achieved by using `.` as the final expression. + """ + name: "print-object" + } + version: { + flag: "--version" + short: "-V" + description: "Prints version information" + name: "version" + } + } + options: { + input: { + option: "--input" + short: "-i" + description: "File containing the object(s) to manipulate. Leave empty to use stdin." + name: "input" + type: "string" + required: true + } + program: { + option: "--program" + short: "-p" + description: "File containing the program to execute. Can be used instead of `PROGRAM`." + name: "program" + type: "string" + required: true + } + } + args: { + program: { + description: """ + The program to execute. For example, `".foo = true"` sets the object's `foo` + field to `true`. + """ + name: "program" + type: "string" + } + } + } + } +} diff --git a/overlays/projects/github.com/timberio/vector/tests.cue b/overlays/projects/github.com/timberio/vector/tests.cue new file mode 100644 index 0000000..78955a9 --- /dev/null +++ b/overlays/projects/github.com/timberio/vector/tests.cue @@ -0,0 +1,3 @@ +package tests + +Versions: ["v0.3.0-beta.5"] diff --git a/projects/github.com/timberio/vector b/projects/github.com/timberio/vector new file mode 160000 index 0000000..c1b030b --- /dev/null +++ b/projects/github.com/timberio/vector @@ -0,0 +1 @@ +Subproject commit c1b030b93de2c2f3cdfede571167d88f059a41a1