From 618b5c47301b96c50924e32239ad2df415ec0f92 Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Mon, 30 Sep 2024 15:03:36 -0500 Subject: [PATCH 01/42] Add cluster events feature chart and a new github action to test feature charts Signed-off-by: Pete Wall --- .github/workflows/reviewdog.yml | 6 +- .github/workflows/test-feature-charts.yml | 88 +++++++++++++++++++ charts/feature-cluster-events/.helmignore | 6 ++ charts/feature-cluster-events/Chart.lock | 3 + charts/feature-cluster-events/Chart.yaml | 7 ++ charts/feature-cluster-events/Makefile | 29 ++++++ charts/feature-cluster-events/README.md | 30 +++++++ .../templates/_helpers.tpl | 17 ++++ .../templates/_module.alloy.tpl | 23 +++++ .../templates/_notes.tpl | 7 ++ .../templates/configmap.yaml | 11 +++ .../tests/default_test.yaml | 24 +++++ .../tests/extra_processing_stages_test.yaml | 37 ++++++++ .../tests/namespace_test.yaml | 26 ++++++ .../feature-cluster-events/values.schema.json | 24 +++++ charts/feature-cluster-events/values.yaml | 24 +++++ 16 files changed, 361 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/test-feature-charts.yml create mode 100644 charts/feature-cluster-events/.helmignore create mode 100644 charts/feature-cluster-events/Chart.lock create mode 100644 charts/feature-cluster-events/Chart.yaml create mode 100644 charts/feature-cluster-events/Makefile create mode 100644 charts/feature-cluster-events/README.md create mode 100644 charts/feature-cluster-events/templates/_helpers.tpl create mode 100644 charts/feature-cluster-events/templates/_module.alloy.tpl create mode 100644 charts/feature-cluster-events/templates/_notes.tpl create mode 100644 charts/feature-cluster-events/templates/configmap.yaml create mode 100644 charts/feature-cluster-events/tests/default_test.yaml create mode 100644 charts/feature-cluster-events/tests/extra_processing_stages_test.yaml create mode 100644 charts/feature-cluster-events/tests/namespace_test.yaml create mode 100644 charts/feature-cluster-events/values.schema.json create mode 100644 charts/feature-cluster-events/values.yaml diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 1aa1b3cf3..148b892f2 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -2,9 +2,13 @@ name: ReviewDog # yamllint disable-line rule:truthy on: - pull_request: + push: branches: ["main"] + pull_request: + + workflow_dispatch: + jobs: markdownlint: name: runner / markdownlint diff --git a/.github/workflows/test-feature-charts.yml b/.github/workflows/test-feature-charts.yml new file mode 100644 index 000000000..93531203f --- /dev/null +++ b/.github/workflows/test-feature-charts.yml @@ -0,0 +1,88 @@ +--- +name: Test Feature Chart +# yamllint disable-line rule:truthy +on: + push: + branches: ["main"] + paths: + - 'charts/**' + - '!charts/k8s-monitoring/**' + - '!charts/k8s-monitoring-v1/**' + pull_request: + paths: + - 'charts/**' + - '!charts/k8s-monitoring/**' + - '!charts/k8s-monitoring-v1/**' + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + + +jobs: + detect-changed-charts: + name: Detect Changed Feature Charts + runs-on: ubuntu-latest + outputs: + changed_dirs: ${{ steps.changed_dirs.outputs.changed_dirs }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Detect Changed Subdirectories + id: changed_charts + run: | + changed_charts=$(git diff --name-only HEAD^ HEAD -- 'charts/*' | grep "^charts/" | cut -d "/" -f2 | sort -u) + if [ -z "$changed_charts" ]; then + echo "No changes detected" + changed_charts="none" + fi + echo "::set-output name=changed_charts::$changed_charts" + + run-tests: + name: Run Tests + needs: detect-changed-charts + runs-on: ubuntu-latest + strategy: + matrix: + dir: ${{ fromJson(needs.detect-changes.outputs.changed_charts) }} + fail-fast: false + if: ${{ needs.detect-changes.outputs.changed_charts != 'none' }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Run Tests in Changed Directories + run: | + echo "Testing ${{ matrix.dir }}" + cd ${{ matrix.dir }} + make test + + check-generated-files: + name: Check Generated Files + needs: detect-changes + runs-on: ubuntu-latest + strategy: + matrix: + dir: ${{ fromJson(needs.detect-changes.outputs.changed_charts) }} + fail-fast: false + if: ${{ needs.detect-changes.outputs.changed_charts != 'none' }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Run make all to regenerate files + run: | + echo "Running make all in charts/${{ matrix.dir }}" + cd charts/${{ matrix.dir }} + make all + + - name: Check for changes in generated files + run: | + cd charts/${{ matrix.dir }} + if [ -n "$(git status --porcelain)" ]; then + echo "Generated files in charts/${{ matrix.dir }} are not up to date. Please run 'make all' and commit the changes." + git diff + exit 1 + else + echo "Generated files in charts/${{ matrix.dir }} are up to date." + fi diff --git a/charts/feature-cluster-events/.helmignore b/charts/feature-cluster-events/.helmignore new file mode 100644 index 000000000..2b29eaf56 --- /dev/null +++ b/charts/feature-cluster-events/.helmignore @@ -0,0 +1,6 @@ +docs +schema-mods +tests +Makefile +README.md +README.md.gotmpl diff --git a/charts/feature-cluster-events/Chart.lock b/charts/feature-cluster-events/Chart.lock new file mode 100644 index 000000000..1e36a5261 --- /dev/null +++ b/charts/feature-cluster-events/Chart.lock @@ -0,0 +1,3 @@ +dependencies: [] +digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726 +generated: "2024-08-21T14:40:45.012164-05:00" diff --git a/charts/feature-cluster-events/Chart.yaml b/charts/feature-cluster-events/Chart.yaml new file mode 100644 index 000000000..046bd4837 --- /dev/null +++ b/charts/feature-cluster-events/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: k8s-monitoring-feature-cluster-events +description: Gathers Kubernetes Events +icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg +version: 1.0.0 +appVersion: 1.0.0 +dependencies: [] diff --git a/charts/feature-cluster-events/Makefile b/charts/feature-cluster-events/Makefile new file mode 100644 index 000000000..4a274d3ac --- /dev/null +++ b/charts/feature-cluster-events/Makefile @@ -0,0 +1,29 @@ + HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) + +.SECONDEXPANSION: +README.md: values.yaml Chart.yaml $$(wildcard README.md.gotmpl) +ifdef HAS_HELM_DOCS + helm-docs +else + docker run --rm --volume "$(shell pwd):/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest +endif + +Chart.lock: Chart.yaml + helm dependency update . + touch Chart.lock # Ensure the timestamp is updated + +values.schema.json: values.yaml $$(wildcard schema-mods/*) + ../../utilities/schema-gen.sh . + +.PHONY: all +all: README.md Chart.lock values.schema.json + +.PHONY: test +test: all + helm lint . +ifdef HAS_HELM_UNITTEST + helm unittest . +else + docker run --rm --volume $(shell pwd):/apps helmunittest/helm-unittest . +endif diff --git a/charts/feature-cluster-events/README.md b/charts/feature-cluster-events/README.md new file mode 100644 index 000000000..020411bd1 --- /dev/null +++ b/charts/feature-cluster-events/README.md @@ -0,0 +1,30 @@ +# cluster-events + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) + +Gathers Kubernetes Events + +## Values + +### Processing settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| extraProcessingStages | string | `""` | Stage blocks to be added to the loki.process component for cluster events. ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki.process/#blocks)) This value is templated so that you can refer to other values from this file. | + +### General settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| fullnameOverride | string | `""` | Full name override | +| nameOverride | string | `""` | Name override | + +### Gather settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| logFormat | string | `"logfmt"` | Log format used to forward cluster events. Allowed values: `logfmt` (default), `json`. | +| namespaces | list | `[]` | List of namespaces to watch for events (`[]` means all namespaces) | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/charts/feature-cluster-events/templates/_helpers.tpl b/charts/feature-cluster-events/templates/_helpers.tpl new file mode 100644 index 000000000..a28241213 --- /dev/null +++ b/charts/feature-cluster-events/templates/_helpers.tpl @@ -0,0 +1,17 @@ +{{/* +Create a default fully qualified name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.clusterEvents.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride | lower }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-events/templates/_module.alloy.tpl b/charts/feature-cluster-events/templates/_module.alloy.tpl new file mode 100644 index 000000000..f53bb8ac7 --- /dev/null +++ b/charts/feature-cluster-events/templates/_module.alloy.tpl @@ -0,0 +1,23 @@ +{{- define "feature.clusterEvents.module" }} +declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "{{ .Values.logFormat }}" + {{- if .Values.namespaces }} + namespaces = {{ .Values.namespaces | toJson }} + {{- end }} +{{- if .Values.extraProcessingStages }} + forward_to = loki.process.cluster_events.receiver + } + + loki.process "cluster_events" { +{{ .Values.extraProcessingStages | indent 4 }} +{{- end }} + forward_to = argument.logs_destinations.value + } +} +{{- end -}} \ No newline at end of file diff --git a/charts/feature-cluster-events/templates/_notes.tpl b/charts/feature-cluster-events/templates/_notes.tpl new file mode 100644 index 000000000..6447c8e4d --- /dev/null +++ b/charts/feature-cluster-events/templates/_notes.tpl @@ -0,0 +1,7 @@ +{{- define "feature.clusterEvents.notes.deployments" }}{{- end }} + +{{- define "feature.clusterEvents.notes.task" }} +Gather Kubernetes Cluster events{{- if .Values.namespaces }} from the namespaces {{ .Values.namespaces | join "," }}{{- end }} +{{- end }} + +{{- define "feature.clusterEvents.notes.actions" }}{{- end }} diff --git a/charts/feature-cluster-events/templates/configmap.yaml b/charts/feature-cluster-events/templates/configmap.yaml new file mode 100644 index 000000000..24a5b4f64 --- /dev/null +++ b/charts/feature-cluster-events/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.deployAsConfigMap }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "feature.clusterEvents.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + module.alloy: |- + {{- include "feature.clusterEvents.module" . | indent 4 }} +{{- end }} diff --git a/charts/feature-cluster-events/tests/default_test.yaml b/charts/feature-cluster-events/tests/default_test.yaml new file mode 100644 index 000000000..cbfc3e038 --- /dev/null +++ b/charts/feature-cluster-events/tests/default_test.yaml @@ -0,0 +1,24 @@ +suite: Test default values +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap + set: + deployAsConfigMap: true + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "logfmt" + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml b/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml new file mode 100644 index 000000000..76b7d03b4 --- /dev/null +++ b/charts/feature-cluster-events/tests/extra_processing_stages_test.yaml @@ -0,0 +1,37 @@ +suite: Test extra processing stages +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap + set: + deployAsConfigMap: true + extraProcessingStages: |- + stage.drop { + source = "namespace" + value = "private" + } + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "logfmt" + forward_to = loki.process.cluster_events.receiver + } + + loki.process "cluster_events" { + stage.drop { + source = "namespace" + value = "private" + } + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-cluster-events/tests/namespace_test.yaml b/charts/feature-cluster-events/tests/namespace_test.yaml new file mode 100644 index 000000000..ed9ef55eb --- /dev/null +++ b/charts/feature-cluster-events/tests/namespace_test.yaml @@ -0,0 +1,26 @@ +suite: Test namespaces +templates: + - configmap.yaml +tests: + - it: should create a ConfigMap + set: + deployAsConfigMap: true + namespaces: ["a", "b"] + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_events" { + argument "logs_destinations" { + comment = "Must be a list of log destinations where collected logs should be forwarded to" + } + + loki.source.kubernetes_events "cluster_events" { + job_name = "integrations/kubernetes/eventhandler" + log_format = "logfmt" + namespaces = ["a","b"] + forward_to = argument.logs_destinations.value + } + } diff --git a/charts/feature-cluster-events/values.schema.json b/charts/feature-cluster-events/values.schema.json new file mode 100644 index 000000000..37c56b68e --- /dev/null +++ b/charts/feature-cluster-events/values.schema.json @@ -0,0 +1,24 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "deployAsConfigMap": { + "type": "boolean" + }, + "extraProcessingStages": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "logFormat": { + "type": "string" + }, + "nameOverride": { + "type": "string" + }, + "namespaces": { + "type": "array" + } + } +} diff --git a/charts/feature-cluster-events/values.yaml b/charts/feature-cluster-events/values.yaml new file mode 100644 index 000000000..f5049e6cc --- /dev/null +++ b/charts/feature-cluster-events/values.yaml @@ -0,0 +1,24 @@ +# -- Name override +# @section -- General settings +nameOverride: "" + +# -- Full name override +# @section -- General settings +fullnameOverride: "" + +# -- List of namespaces to watch for events (`[]` means all namespaces) +# @section -- Gather settings +namespaces: [] + +# -- Log format used to forward cluster events. Allowed values: `logfmt` (default), `json`. +# @section -- Gather settings +logFormat: logfmt + +# -- Stage blocks to be added to the loki.process component for cluster events. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/loki.process/#blocks)) +# This value is templated so that you can refer to other values from this file. +# @section -- Processing settings +extraProcessingStages: "" + +# @ignore +deployAsConfigMap: false From c3aa695584dbc1ab74d38bf4d7517be62babee7b Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Mon, 30 Sep 2024 15:12:36 -0500 Subject: [PATCH 02/42] Update test action Signed-off-by: Pete Wall --- .github/workflows/test-feature-charts.yml | 52 ++++++++++++++++++----- charts/feature-cluster-events/Makefile | 2 +- charts/feature-cluster-events/README.md | 2 +- 3 files changed, 43 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test-feature-charts.yml b/.github/workflows/test-feature-charts.yml index 93531203f..1203a96d5 100644 --- a/.github/workflows/test-feature-charts.yml +++ b/.github/workflows/test-feature-charts.yml @@ -23,20 +23,50 @@ jobs: name: Detect Changed Feature Charts runs-on: ubuntu-latest outputs: - changed_dirs: ${{ steps.changed_dirs.outputs.changed_dirs }} + changed_charts: ${{ steps.changed_charts.outputs.changed_charts }} steps: - name: Checkout code uses: actions/checkout@v3 - - name: Detect Changed Subdirectories + - name: Detect Changed Feature Charts id: changed_charts run: | - changed_charts=$(git diff --name-only HEAD^ HEAD -- 'charts/*' | grep "^charts/" | cut -d "/" -f2 | sort -u) + if [ "${{ github.event_name }}" == "pull_request" ]; then + # In pull request, compare against the base branch (upstream) + base_branch="${{ github.event.pull_request.base.ref }}" + echo "Comparing against base branch: $base_branch" + git fetch origin $base_branch + base_commit="origin/$base_branch" + elif [ "${{ github.event_name }}" == "push" ]; then + # In push to main, compare the last commit with HEAD^ + base_commit="HEAD^" + elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then + # In manual trigger, run for all feature charts + echo "Manual dispatch detected, running tests for all feature charts" + echo "changed_charts=$(ls charts | grep -v "k8s-monitoring" | sort -u)" >> ${GITHUB_OUTPUT} + exit 0 + fi + + # Check if base commit exists, fallback to empty tree if none + if ! git rev-parse --verify $base_commit >/dev/null 2>&1; then + base_commit=$(git hash-object -t tree /dev/null) + fi + + # Detect modified files + modified_charts=$(git diff --name-only $base_commit HEAD -- 'charts/*' | grep "^charts/" | cut -d "/" -f2 | sort -u) + + # Detect newly added files (untracked files) + added_charts=$(git ls-files --others --exclude-standard -- 'charts/*' | grep "^charts/" | cut -d "/" -f2 | sort -u) + + # Combine both added and modified charts + changed_charts=$(echo -e "$modified_charts\n$added_charts" | grep -v "k8s-monitoring" | sort -u) + if [ -z "$changed_charts" ]; then - echo "No changes detected" + echo "No changes detected in charts" changed_charts="none" fi - echo "::set-output name=changed_charts::$changed_charts" + echo "Changed feature charts: $changed_charts" + echo "changed_charts=$(echo "$changed_charts" | jq --raw-input --slurp --compact-output 'split("\n") | map(select(. != ""))')" >> ${GITHUB_OUTPUT} run-tests: name: Run Tests @@ -44,9 +74,9 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - dir: ${{ fromJson(needs.detect-changes.outputs.changed_charts) }} + dir: ${{ fromJson(needs.detect-changed-charts.outputs.changed_charts) }} fail-fast: false - if: ${{ needs.detect-changes.outputs.changed_charts != 'none' }} + if: ${{ needs.detect-changed-charts.outputs.changed_charts != 'none' }} steps: - name: Checkout code uses: actions/checkout@v3 @@ -54,18 +84,18 @@ jobs: - name: Run Tests in Changed Directories run: | echo "Testing ${{ matrix.dir }}" - cd ${{ matrix.dir }} + cd charts/${{ matrix.dir }} make test check-generated-files: name: Check Generated Files - needs: detect-changes + needs: detect-changed-charts runs-on: ubuntu-latest strategy: matrix: - dir: ${{ fromJson(needs.detect-changes.outputs.changed_charts) }} + dir: ${{ fromJson(needs.detect-changed-charts.outputs.changed_charts) }} fail-fast: false - if: ${{ needs.detect-changes.outputs.changed_charts != 'none' }} + if: ${{ needs.detect-changed-charts.outputs.changed_charts != 'none' }} steps: - name: Checkout code uses: actions/checkout@v3 diff --git a/charts/feature-cluster-events/Makefile b/charts/feature-cluster-events/Makefile index 4a274d3ac..16266ef07 100644 --- a/charts/feature-cluster-events/Makefile +++ b/charts/feature-cluster-events/Makefile @@ -11,7 +11,7 @@ endif Chart.lock: Chart.yaml helm dependency update . - touch Chart.lock # Ensure the timestamp is updated + @touch Chart.lock # Ensure the timestamp is updated values.schema.json: values.yaml $$(wildcard schema-mods/*) ../../utilities/schema-gen.sh . diff --git a/charts/feature-cluster-events/README.md b/charts/feature-cluster-events/README.md index 020411bd1..a84101ebd 100644 --- a/charts/feature-cluster-events/README.md +++ b/charts/feature-cluster-events/README.md @@ -1,4 +1,4 @@ -# cluster-events +# k8s-monitoring-feature-cluster-events ![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) From ada7950072b10f02091fd3550377bd2640e48726 Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Mon, 30 Sep 2024 16:43:07 -0500 Subject: [PATCH 03/42] Add a gitkeep to make the snapshot directory exist Signed-off-by: Pete Wall --- charts/feature-cluster-events/tests/__snapshot__/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 charts/feature-cluster-events/tests/__snapshot__/.gitkeep diff --git a/charts/feature-cluster-events/tests/__snapshot__/.gitkeep b/charts/feature-cluster-events/tests/__snapshot__/.gitkeep new file mode 100644 index 000000000..e69de29bb From f2c55cab2f3d360ced2420b6905dea5d793ae0b2 Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Mon, 30 Sep 2024 16:46:52 -0500 Subject: [PATCH 04/42] Add annotation feature chart Signed-off-by: Pete Wall --- .../.helmignore | 6 + .../Chart.lock | 3 + .../Chart.yaml | 7 + .../feature-annotation-autodiscovery/Makefile | 29 +++ .../README.md | 59 +++++ .../templates/_helpers.tpl | 29 +++ .../templates/_module.alloy.tpl | 232 ++++++++++++++++++ .../templates/_notes.tpl | 7 + .../templates/configmap.yaml | 11 + .../tests/__snapshot__/.gitkeep | 0 .../tests/default_test.yaml | 208 ++++++++++++++++ .../tests/prometheus_annotation_test.yaml | 213 ++++++++++++++++ .../values.schema.json | 89 +++++++ .../values.yaml | 94 +++++++ charts/feature-cluster-events/Makefile | 2 +- 15 files changed, 988 insertions(+), 1 deletion(-) create mode 100644 charts/feature-annotation-autodiscovery/.helmignore create mode 100644 charts/feature-annotation-autodiscovery/Chart.lock create mode 100644 charts/feature-annotation-autodiscovery/Chart.yaml create mode 100644 charts/feature-annotation-autodiscovery/Makefile create mode 100644 charts/feature-annotation-autodiscovery/README.md create mode 100644 charts/feature-annotation-autodiscovery/templates/_helpers.tpl create mode 100644 charts/feature-annotation-autodiscovery/templates/_module.alloy.tpl create mode 100644 charts/feature-annotation-autodiscovery/templates/_notes.tpl create mode 100644 charts/feature-annotation-autodiscovery/templates/configmap.yaml create mode 100644 charts/feature-annotation-autodiscovery/tests/__snapshot__/.gitkeep create mode 100644 charts/feature-annotation-autodiscovery/tests/default_test.yaml create mode 100644 charts/feature-annotation-autodiscovery/tests/prometheus_annotation_test.yaml create mode 100644 charts/feature-annotation-autodiscovery/values.schema.json create mode 100644 charts/feature-annotation-autodiscovery/values.yaml diff --git a/charts/feature-annotation-autodiscovery/.helmignore b/charts/feature-annotation-autodiscovery/.helmignore new file mode 100644 index 000000000..2b29eaf56 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/.helmignore @@ -0,0 +1,6 @@ +docs +schema-mods +tests +Makefile +README.md +README.md.gotmpl diff --git a/charts/feature-annotation-autodiscovery/Chart.lock b/charts/feature-annotation-autodiscovery/Chart.lock new file mode 100644 index 000000000..b89222ee5 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/Chart.lock @@ -0,0 +1,3 @@ +dependencies: [] +digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726 +generated: "2024-09-25T13:45:54.706765-05:00" diff --git a/charts/feature-annotation-autodiscovery/Chart.yaml b/charts/feature-annotation-autodiscovery/Chart.yaml new file mode 100644 index 000000000..0d04a1d8e --- /dev/null +++ b/charts/feature-annotation-autodiscovery/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: k8s-monitoring-feature-annotation-autodiscovery +description: Gathers metrics automatically based on Kubernetes Pod and Service annotations +icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg +version: 1.0.0 +appVersion: 1.0.0 +dependencies: [] diff --git a/charts/feature-annotation-autodiscovery/Makefile b/charts/feature-annotation-autodiscovery/Makefile new file mode 100644 index 000000000..66c31bd1a --- /dev/null +++ b/charts/feature-annotation-autodiscovery/Makefile @@ -0,0 +1,29 @@ +HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) + +.SECONDEXPANSION: +README.md: values.yaml Chart.yaml $$(wildcard README.md.gotmpl) +ifdef HAS_HELM_DOCS + helm-docs +else + docker run --rm --volume "$(shell pwd):/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest +endif + +Chart.lock: Chart.yaml + helm dependency update . + @touch Chart.lock # Ensure the timestamp is updated + +values.schema.json: values.yaml $$(wildcard schema-mods/*) + ../../utilities/schema-gen.sh . + +.PHONY: all +all: README.md Chart.lock values.schema.json + +.PHONY: test +test: all + helm lint . +ifdef HAS_HELM_UNITTEST + helm unittest . +else + docker run --rm --volume $(shell pwd):/apps helmunittest/helm-unittest . +endif diff --git a/charts/feature-annotation-autodiscovery/README.md b/charts/feature-annotation-autodiscovery/README.md new file mode 100644 index 000000000..78a096bca --- /dev/null +++ b/charts/feature-annotation-autodiscovery/README.md @@ -0,0 +1,59 @@ +# k8s-monitoring-feature-annotation-autodiscovery + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) + +Gathers metrics automatically based on Kubernetes Pod and Service annotations + +## Values + +### Annotations + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| annotations.instance | string | `"k8s.grafana.com/instance"` | Annotation for overriding the instance label | +| annotations.job | string | `"k8s.grafana.com/job"` | Annotation for overriding the job label | +| annotations.metricsPath | string | `"k8s.grafana.com/metrics.path"` | Annotation for setting or overriding the metrics path. If not set, it defaults to /metrics | +| annotations.metricsPortName | string | `"k8s.grafana.com/metrics.portName"` | Annotation for setting the metrics port by name. | +| annotations.metricsPortNumber | string | `"k8s.grafana.com/metrics.portNumber"` | Annotation for setting the metrics port by number. | +| annotations.metricsScheme | string | `"k8s.grafana.com/metrics.scheme"` | Annotation for setting the metrics scheme, default: http. | +| annotations.metricsScrapeInterval | string | `"k8s.grafana.com/metrics.scrapeInterval"` | Annotation for overriding the scrape interval for this service or pod. Value should be a duration like "15s, 1m". Overrides metrics.autoDiscover.scrapeInterval | +| annotations.scrape | string | `"k8s.grafana.com/scrape"` | Annotation for enabling scraping for this service or pod. Value should be either "true" or "false" | + +### Scrape Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| bearerToken | object | `{"enabled":true,"token":"/var/run/secrets/kubernetes.io/serviceaccount/token"}` | Sets bearer_token_file line in the prometheus.scrape annotation_autodiscovery. | +| scrapeInterval | string | 60s | How frequently to scrape metrics from PodMonitor objects. Only used if the PodMonitor does not specify the scrape interval. Overrides global.scrapeInterval | + +### Discovery Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| extraDiscoveryRules | string | `""` | Rule blocks to be added to the prometheus.operator.podmonitors component for PodMonitors. These relabeling rules are applied pre-scrape against the targets from service discovery. The relabelings defined in the PodMonitor object are applied first, then these relabelings are applied. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | + +### Metric Processing Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for PodMonitor objects. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | + +### General settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| fullnameOverride | string | `""` | Full name override | +| nameOverride | string | `""` | Name override | + +### Global Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.maxCacheSize | int | `100000` | Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) This should be at least 2x-5x your largest scrape target or samples appended rate. | +| global.scrapeInterval | string | `"60s"` | How frequently to scrape metrics. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/charts/feature-annotation-autodiscovery/templates/_helpers.tpl b/charts/feature-annotation-autodiscovery/templates/_helpers.tpl new file mode 100644 index 000000000..b42d94195 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/templates/_helpers.tpl @@ -0,0 +1,29 @@ +{{/* +Create a default fully qualified name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.annotationAutodiscovery.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride | lower }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "escape_annotation" -}} +{{ . | replace "-" "_" | replace "." "_" | replace "/" "_" }} +{{- end }} + +{{- define "pod_annotation" -}} +{{ printf "__meta_kubernetes_pod_annotation_%s" (include "escape_annotation" .) }} +{{- end }} + +{{- define "service_annotation" -}} +{{ printf "__meta_kubernetes_service_annotation_%s" (include "escape_annotation" .) }} +{{- end }} diff --git a/charts/feature-annotation-autodiscovery/templates/_module.alloy.tpl b/charts/feature-annotation-autodiscovery/templates/_module.alloy.tpl new file mode 100644 index 000000000..ada62d60b --- /dev/null +++ b/charts/feature-annotation-autodiscovery/templates/_module.alloy.tpl @@ -0,0 +1,232 @@ +{{- define "feature.annotationAutodiscovery.module" }} +declare "annotation_autodiscovery" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.relabel "annotation_autodiscovery_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.scrape }}"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.job }}"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.instance }}"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPath }}"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the pod port + // The discovery generates a target for each declared container port of the pod. + // If the metricsPortName annotation has value, keep only the target where the port name matches the one of the annotation. + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortName }}"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + // If the metrics port number annotation has a value, override the target address to use it, regardless whether it is + // one of the declared ports on that Pod. + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortNumber }}", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})" + replacement = "[$2]:$1" // IPv6 + target_label = "__address__" + } + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsPortNumber }}", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);((([0-9]+?)(\\.|$)){4})" // IPv4, takes priority over IPv6 when both exists + replacement = "$2:$1" + target_label = "__address__" + } + + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsScheme }}"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["{{ include "pod_annotation" .Values.annotations.metricsScrapeInterval }}"] + action = "replace" + target_label = "__scrape_interval__" + } +{{- if .Values.extraDiscoveryRules }} +{{ .Values.extraDiscoveryRules | indent 4 }} +{{- end }} + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.relabel "annotation_autodiscovery_services" { + targets = discovery.kubernetes.services.targets + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.scrape }}"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.job }}"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.instance }}"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPath }}"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the service port + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPortName }}"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + target_label = "__tmp_port" + } + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.metricsPortNumber }}"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.metricsScheme }}"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["{{ include "service_annotation" .Values.annotations.metricsScrapeInterval }}"] + action = "replace" + target_label = "__scrape_interval__" + } +{{- if .Values.extraDiscoveryRules }} +{{ .Values.extraDiscoveryRules | indent 4 }} +{{- end }} + } + + discovery.relabel "annotation_autodiscovery_http" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "drop" + } + } + + discovery.relabel "annotation_autodiscovery_https" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "keep" + } + } + + prometheus.scrape "annotation_autodiscovery_http" { + targets = discovery.relabel.annotation_autodiscovery_http.output + scrape_interval = {{ .Values.scrapeInterval | default .Values.global.scrapeInterval | quote }} + honor_labels = true +{{- if .Values.bearerToken.enabled }} + bearer_token_file = {{ .Values.bearerToken.token | quote }} +{{- end }} + clustering { + enabled = true + } +{{ if or .Values.metricsTuning.includeMetrics .Values.metricsTuning.excludeMetrics .Values.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.annotation_autodiscovery.receiver] +{{- else }} + forward_to = argument.metrics_destinations.value +{{- end }} + } + + prometheus.scrape "annotation_autodiscovery_https" { + targets = discovery.relabel.annotation_autodiscovery_https.output + scrape_interval = {{ .Values.scrapeInterval | default .Values.global.scrapeInterval | quote }} + honor_labels = true +{{- if .Values.bearerToken.enabled }} + bearer_token_file = {{ .Values.bearerToken.token | quote }} +{{- end }} + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } +{{ if or .Values.metricsTuning.includeMetrics .Values.metricsTuning.excludeMetrics .Values.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.annotation_autodiscovery.receiver] + } + + prometheus.relabel "annotation_autodiscovery" { + max_cache_size = {{ .Values.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if .Values.metricsTuning.includeMetrics }} + rule { + source_labels = ["__name__"] + regex = "up|{{ join "|" .Values.metricsTuning.includeMetrics }}" + action = "keep" + } +{{- end }} +{{- if .Values.metricsTuning.excludeMetrics }} + rule { + source_labels = ["__name__"] + regex = {{ join "|" .Values.metricsTuning.excludeMetrics | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.extraMetricProcessingRules }} +{{ .Values.extraMetricProcessingRules | indent 4 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value + } +} +{{- end -}} diff --git a/charts/feature-annotation-autodiscovery/templates/_notes.tpl b/charts/feature-annotation-autodiscovery/templates/_notes.tpl new file mode 100644 index 000000000..6aa7d11ec --- /dev/null +++ b/charts/feature-annotation-autodiscovery/templates/_notes.tpl @@ -0,0 +1,7 @@ +{{- define "feature.annotationAutodiscovery.notes.deployments" }}{{- end }} + +{{- define "feature.annotationAutodiscovery.notes.task" }} +Scrape metrics from pods and services with the "{{.Values.annotations.scrape}}: true" annotation +{{- end }} + +{{- define "feature.annotationAutodiscovery.notes.actions" }}{{- end }} diff --git a/charts/feature-annotation-autodiscovery/templates/configmap.yaml b/charts/feature-annotation-autodiscovery/templates/configmap.yaml new file mode 100644 index 000000000..cf16bfba7 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.deployAsConfigMap }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "feature.annotationAutodiscovery.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + module.alloy: |- + {{- include "feature.annotationAutodiscovery.module" . | indent 4 }} +{{- end }} diff --git a/charts/feature-annotation-autodiscovery/tests/__snapshot__/.gitkeep b/charts/feature-annotation-autodiscovery/tests/__snapshot__/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/charts/feature-annotation-autodiscovery/tests/default_test.yaml b/charts/feature-annotation-autodiscovery/tests/default_test.yaml new file mode 100644 index 000000000..8ee4f8d36 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/tests/default_test.yaml @@ -0,0 +1,208 @@ +suite: Test default values +templates: +- configmap.yaml +tests: +- it: creates a module with default discovery, scraping, and processing configurations + set: + deployAsConfigMap: true + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "annotation_autodiscovery" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.relabel "annotation_autodiscovery_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the pod port + // The discovery generates a target for each declared container port of the pod. + // If the metricsPortName annotation has value, keep only the target where the port name matches the one of the annotation. + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + // If the metrics port number annotation has a value, override the target address to use it, regardless whether it is + // one of the declared ports on that Pod. + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})" + replacement = "[$2]:$1" // IPv6 + target_label = "__address__" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);((([0-9]+?)(\\.|$)){4})" // IPv4, takes priority over IPv6 when both exists + replacement = "$2:$1" + target_label = "__address__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.relabel "annotation_autodiscovery_services" { + targets = discovery.kubernetes.services.targets + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the service port + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portNumber"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.relabel "annotation_autodiscovery_http" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "drop" + } + } + + discovery.relabel "annotation_autodiscovery_https" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "keep" + } + } + + prometheus.scrape "annotation_autodiscovery_http" { + targets = discovery.relabel.annotation_autodiscovery_http.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "annotation_autodiscovery_https" { + targets = discovery.relabel.annotation_autodiscovery_https.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } + } + diff --git a/charts/feature-annotation-autodiscovery/tests/prometheus_annotation_test.yaml b/charts/feature-annotation-autodiscovery/tests/prometheus_annotation_test.yaml new file mode 100644 index 000000000..45914ccf3 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/tests/prometheus_annotation_test.yaml @@ -0,0 +1,213 @@ +suite: Test with prometheus.io annotations +templates: +- configmap.yaml +tests: +- it: creates a module with default discovery, scraping, and processing configurations + set: + deployAsConfigMap: true + annotations: + scrape: prometheus.io/scrape + metricsScheme: prometheus.io/scheme + metricsPath: prometheus.io/path + metricsPort: prometheus.io/port + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "annotation_autodiscovery" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + discovery.kubernetes "pods" { + role = "pod" + } + + discovery.relabel "annotation_autodiscovery_pods" { + targets = discovery.kubernetes.pods.targets + rule { + source_labels = ["__meta_kubernetes_pod_annotation_prometheus_io_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_prometheus_io_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the pod port + // The discovery generates a target for each declared container port of the pod. + // If the metricsPortName annotation has value, keep only the target where the port name matches the one of the annotation. + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_pod_container_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + // If the metrics port number annotation has a value, override the target address to use it, regardless whether it is + // one of the declared ports on that Pod. + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})" + replacement = "[$2]:$1" // IPv6 + target_label = "__address__" + } + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_portNumber", "__meta_kubernetes_pod_ip"] + regex = "(\\d+);((([0-9]+?)(\\.|$)){4})" // IPv4, takes priority over IPv6 when both exists + replacement = "$2:$1" + target_label = "__address__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_prometheus_io_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_pod_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.kubernetes "services" { + role = "service" + } + + discovery.relabel "annotation_autodiscovery_services" { + targets = discovery.kubernetes.services.targets + rule { + source_labels = ["__meta_kubernetes_service_annotation_prometheus_io_scrape"] + regex = "true" + action = "keep" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_job"] + action = "replace" + target_label = "job" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_instance"] + action = "replace" + target_label = "instance" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_prometheus_io_path"] + action = "replace" + target_label = "__metrics_path__" + } + + // Choose the service port + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portName"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_name"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_portNumber"] + regex = "(.+)" + target_label = "__tmp_port" + } + rule { + source_labels = ["__meta_kubernetes_service_port_number"] + action = "keepequal" + target_label = "__tmp_port" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_prometheus_io_scheme"] + action = "replace" + target_label = "__scheme__" + } + + rule { + source_labels = ["__meta_kubernetes_service_annotation_k8s_grafana_com_metrics_scrapeInterval"] + action = "replace" + target_label = "__scrape_interval__" + } + } + + discovery.relabel "annotation_autodiscovery_http" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "drop" + } + } + + discovery.relabel "annotation_autodiscovery_https" { + targets = concat(discovery.relabel.annotation_autodiscovery_pods.output, discovery.relabel.annotation_autodiscovery_services.output) + rule { + source_labels = ["__scheme__"] + regex = "https" + action = "keep" + } + } + + prometheus.scrape "annotation_autodiscovery_http" { + targets = discovery.relabel.annotation_autodiscovery_http.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } + + prometheus.scrape "annotation_autodiscovery_https" { + targets = discovery.relabel.annotation_autodiscovery_https.output + scrape_interval = "60s" + honor_labels = true + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } + + forward_to = argument.metrics_destinations.value + } + } + diff --git a/charts/feature-annotation-autodiscovery/values.schema.json b/charts/feature-annotation-autodiscovery/values.schema.json new file mode 100644 index 000000000..096f562db --- /dev/null +++ b/charts/feature-annotation-autodiscovery/values.schema.json @@ -0,0 +1,89 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "annotations": { + "type": "object", + "properties": { + "instance": { + "type": "string" + }, + "job": { + "type": "string" + }, + "metricsPath": { + "type": "string" + }, + "metricsPortName": { + "type": "string" + }, + "metricsPortNumber": { + "type": "string" + }, + "metricsScheme": { + "type": "string" + }, + "metricsScrapeInterval": { + "type": "string" + }, + "scrape": { + "type": "string" + } + } + }, + "bearerToken": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "token": { + "type": "string" + } + } + }, + "deployAsConfigMap": { + "type": "boolean" + }, + "extraDiscoveryRules": { + "type": "string" + }, + "extraMetricProcessingRules": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "global": { + "type": "object", + "properties": { + "maxCacheSize": { + "type": "integer" + }, + "scrapeInterval": { + "type": "string" + } + } + }, + "maxCacheSize": { + "type": "null" + }, + "metricsTuning": { + "type": "object", + "properties": { + "excludeMetrics": { + "type": "array" + }, + "includeMetrics": { + "type": "array" + } + } + }, + "nameOverride": { + "type": "string" + }, + "scrapeInterval": { + "type": "string" + } + } +} diff --git a/charts/feature-annotation-autodiscovery/values.yaml b/charts/feature-annotation-autodiscovery/values.yaml new file mode 100644 index 000000000..6d56bbb10 --- /dev/null +++ b/charts/feature-annotation-autodiscovery/values.yaml @@ -0,0 +1,94 @@ + +# -- Name override +# @section -- General settings +nameOverride: "" + +# -- Full name override +# @section -- General settings +fullnameOverride: "" + +global: + # -- How frequently to scrape metrics. + # @section -- Global Settings + scrapeInterval: 60s + + # -- Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) + # This should be at least 2x-5x your largest scrape target or samples appended rate. + # @section -- Global Settings + maxCacheSize: 100000 + +# Annotations that are used to discover and configure metric scraping targets. Add these annotations +# to your services or pods to control how autodiscovery will find and scrape metrics from your service or pod. +annotations: + # -- Annotation for enabling scraping for this service or pod. Value should be either "true" or "false" + # @section -- Annotations + scrape: "k8s.grafana.com/scrape" + # -- Annotation for overriding the job label + # @section -- Annotations + job: "k8s.grafana.com/job" + # -- Annotation for overriding the instance label + # @section -- Annotations + instance: "k8s.grafana.com/instance" + # -- Annotation for setting or overriding the metrics path. If not set, it defaults to /metrics + # @section -- Annotations + metricsPath: "k8s.grafana.com/metrics.path" + # -- Annotation for setting the metrics port by name. + # @section -- Annotations + metricsPortName: "k8s.grafana.com/metrics.portName" + # -- Annotation for setting the metrics port by number. + # @section -- Annotations + metricsPortNumber: "k8s.grafana.com/metrics.portNumber" + # -- Annotation for setting the metrics scheme, default: http. + # @section -- Annotations + metricsScheme: "k8s.grafana.com/metrics.scheme" + # -- Annotation for overriding the scrape interval for this service or pod. Value should be a duration like "15s, 1m". + # Overrides metrics.autoDiscover.scrapeInterval + # @section -- Annotations + metricsScrapeInterval: "k8s.grafana.com/metrics.scrapeInterval" + +# -- Rule blocks to be added to the prometheus.operator.podmonitors component for PodMonitors. +# These relabeling rules are applied pre-scrape against the targets from service discovery. +# The relabelings defined in the PodMonitor object are applied first, then these relabelings are applied. +# Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) +# @section -- Discovery Settings +extraDiscoveryRules: "" + +# -- How frequently to scrape metrics from PodMonitor objects. Only used if the PodMonitor does not specify the scrape interval. +# Overrides global.scrapeInterval +# @default -- 60s +# @section -- Scrape Settings +scrapeInterval: "" + +# Adjustments to the scraped metrics to filter the amount of metrics sent to storage. +# @section -- Metric Processing Settings +metricsTuning: + # -- Metrics to keep. Can use regular expressions. + # @section -- Metric Processing Settings + includeMetrics: [] + # -- Metrics to drop. Can use regular expressions. + # @section -- Metric Processing Settings + excludeMetrics: [] + +# -- Rule blocks to be added to the prometheus.relabel component for PodMonitor objects. +# These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) +# @section -- Metric Processing Settings +extraMetricProcessingRules: "" + +# -- Sets the max_cache_size for cadvisor prometheus.relabel component. +# This should be at least 2x-5x your largest scrape target or samples appended rate. +# ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) +# Overrides global.maxCacheSize +# @raw +# @section -- Metric Processing Settings +maxCacheSize: + +# -- Sets bearer_token_file line in the prometheus.scrape annotation_autodiscovery. +# @section -- Scrape Settings +bearerToken: + enabled: true + token: /var/run/secrets/kubernetes.io/serviceaccount/token + +# @ignore +deployAsConfigMap: false diff --git a/charts/feature-cluster-events/Makefile b/charts/feature-cluster-events/Makefile index 16266ef07..66c31bd1a 100644 --- a/charts/feature-cluster-events/Makefile +++ b/charts/feature-cluster-events/Makefile @@ -1,4 +1,4 @@ - HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_DOCS := $(shell command -v helm-docs;) HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) .SECONDEXPANSION: From 0d2302569c4ffb3253e6631958568bd6f5fa7a94 Mon Sep 17 00:00:00 2001 From: Pete Wall Date: Mon, 30 Sep 2024 17:00:54 -0500 Subject: [PATCH 05/42] Add cluster metrics feature Signed-off-by: Pete Wall --- .github/workflows/test-feature-charts.yml | 6 +- charts/feature-cluster-metrics/.helmignore | 6 + charts/feature-cluster-metrics/Chart.lock | 15 + charts/feature-cluster-metrics/Chart.yaml | 28 + charts/feature-cluster-metrics/Makefile | 29 + charts/feature-cluster-metrics/README.md | 316 +++++++++ .../feature-cluster-metrics/README.md.gotmpl | 139 ++++ .../charts/kepler-0.5.9.tgz | Bin 0 -> 5066 bytes .../charts/kube-state-metrics-5.25.1.tgz | Bin 0 -> 14234 bytes .../prometheus-node-exporter-4.39.0.tgz | Bin 0 -> 14202 bytes .../prometheus-windows-exporter-0.5.1.tgz | Bin 0 -> 8145 bytes .../default-allow-lists/cadvisor.yaml | 20 + .../default-allow-lists/kepler.yaml | 3 + .../kube-state-metrics.yaml | 30 + .../default-allow-lists/kubelet.yaml | 34 + .../node-exporter-integration.yaml | 111 ++++ .../default-allow-lists/node-exporter.yaml | 12 + .../default-allow-lists/opencost.yaml | 27 + .../default-allow-lists/windows-exporter.yaml | 7 + .../schema-mods/remove-subchart-fields.jq | 1 + .../schema-mods/types-and-enums.json | 5 + .../templates/_api_server.alloy.tpl | 33 + .../templates/_cadvisor.alloy.tpl | 109 ++++ .../templates/_helpers.tpl | 17 + .../templates/_kepler.alloy.tpl | 77 +++ .../_kube_controller_manager.alloy.tpl | 68 ++ .../templates/_kube_proxy.alloy.tpl | 64 ++ .../templates/_kube_scheduler.alloy.tpl | 68 ++ .../templates/_kube_state_metrics.alloy.tpl | 57 ++ .../templates/_kubelet.alloy.tpl | 38 ++ .../templates/_module.alloy.tpl | 26 + .../templates/_node_exporter.alloy.tpl | 60 ++ .../templates/_notes.tpl | 20 + .../templates/_windows_exporter.alloy.tpl | 60 ++ .../templates/configmap.yaml | 11 + .../openshift/kepler-scc.yaml | 66 ++ .../tests/__snapshot__/.gitkeep | 0 .../tests/default_test.yaml | 182 ++++++ .../tests/openshift_test.yaml | 33 + .../values.schema.json | 585 +++++++++++++++++ charts/feature-cluster-metrics/values.yaml | 602 ++++++++++++++++++ 41 files changed, 2962 insertions(+), 3 deletions(-) create mode 100644 charts/feature-cluster-metrics/.helmignore create mode 100644 charts/feature-cluster-metrics/Chart.lock create mode 100644 charts/feature-cluster-metrics/Chart.yaml create mode 100644 charts/feature-cluster-metrics/Makefile create mode 100644 charts/feature-cluster-metrics/README.md create mode 100644 charts/feature-cluster-metrics/README.md.gotmpl create mode 100644 charts/feature-cluster-metrics/charts/kepler-0.5.9.tgz create mode 100644 charts/feature-cluster-metrics/charts/kube-state-metrics-5.25.1.tgz create mode 100644 charts/feature-cluster-metrics/charts/prometheus-node-exporter-4.39.0.tgz create mode 100644 charts/feature-cluster-metrics/charts/prometheus-windows-exporter-0.5.1.tgz create mode 100644 charts/feature-cluster-metrics/default-allow-lists/cadvisor.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/kepler.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/kube-state-metrics.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/kubelet.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/node-exporter-integration.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/node-exporter.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/opencost.yaml create mode 100644 charts/feature-cluster-metrics/default-allow-lists/windows-exporter.yaml create mode 100644 charts/feature-cluster-metrics/schema-mods/remove-subchart-fields.jq create mode 100644 charts/feature-cluster-metrics/schema-mods/types-and-enums.json create mode 100644 charts/feature-cluster-metrics/templates/_api_server.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_cadvisor.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_helpers.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kepler.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_module.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/_notes.tpl create mode 100644 charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl create mode 100644 charts/feature-cluster-metrics/templates/configmap.yaml create mode 100644 charts/feature-cluster-metrics/templates/platform_specific/openshift/kepler-scc.yaml create mode 100644 charts/feature-cluster-metrics/tests/__snapshot__/.gitkeep create mode 100644 charts/feature-cluster-metrics/tests/default_test.yaml create mode 100644 charts/feature-cluster-metrics/tests/openshift_test.yaml create mode 100644 charts/feature-cluster-metrics/values.schema.json create mode 100644 charts/feature-cluster-metrics/values.yaml diff --git a/.github/workflows/test-feature-charts.yml b/.github/workflows/test-feature-charts.yml index 1203a96d5..237aabcc0 100644 --- a/.github/workflows/test-feature-charts.yml +++ b/.github/workflows/test-feature-charts.yml @@ -69,7 +69,7 @@ jobs: echo "changed_charts=$(echo "$changed_charts" | jq --raw-input --slurp --compact-output 'split("\n") | map(select(. != ""))')" >> ${GITHUB_OUTPUT} run-tests: - name: Run Tests + name: Testing needs: detect-changed-charts runs-on: ubuntu-latest strategy: @@ -81,7 +81,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - - name: Run Tests in Changed Directories + - name: Run tests run: | echo "Testing ${{ matrix.dir }}" cd charts/${{ matrix.dir }} @@ -100,7 +100,7 @@ jobs: - name: Checkout code uses: actions/checkout@v3 - - name: Run make all to regenerate files + - name: Regenerate files run: | echo "Running make all in charts/${{ matrix.dir }}" cd charts/${{ matrix.dir }} diff --git a/charts/feature-cluster-metrics/.helmignore b/charts/feature-cluster-metrics/.helmignore new file mode 100644 index 000000000..2b29eaf56 --- /dev/null +++ b/charts/feature-cluster-metrics/.helmignore @@ -0,0 +1,6 @@ +docs +schema-mods +tests +Makefile +README.md +README.md.gotmpl diff --git a/charts/feature-cluster-metrics/Chart.lock b/charts/feature-cluster-metrics/Chart.lock new file mode 100644 index 000000000..167ef4241 --- /dev/null +++ b/charts/feature-cluster-metrics/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.25.1 +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.39.0 +- name: prometheus-windows-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 0.5.1 +- name: kepler + repository: https://sustainable-computing-io.github.io/kepler-helm-chart + version: 0.5.9 +digest: sha256:a4fcfa6c94a3443665921ad79e766dccb8439d9c3d2f4477bc76cd7e216e3146 +generated: "2024-09-26T16:46:00.295162-05:00" diff --git a/charts/feature-cluster-metrics/Chart.yaml b/charts/feature-cluster-metrics/Chart.yaml new file mode 100644 index 000000000..85aa77927 --- /dev/null +++ b/charts/feature-cluster-metrics/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +name: k8s-monitoring-feature-cluster-metrics +description: Gathers Kubernetes Cluster metrics +icon: https://raw.githubusercontent.com/grafana/grafana/main/public/img/grafana_icon.svg +version: 1.0.0 +appVersion: 1.0.0 +dependencies: + - name: kube-state-metrics + version: 5.25.1 + repository: https://prometheus-community.github.io/helm-charts + condition: kube-state-metrics.deploy + + - alias: node-exporter + name: prometheus-node-exporter + version: 4.39.0 + repository: https://prometheus-community.github.io/helm-charts + condition: node-exporter.deploy + + - alias: windows-exporter + name: prometheus-windows-exporter + version: 0.5.1 + repository: https://prometheus-community.github.io/helm-charts + condition: windows-exporter.deploy + + - name: kepler + version: 0.5.9 + repository: https://sustainable-computing-io.github.io/kepler-helm-chart + condition: kepler.enabled diff --git a/charts/feature-cluster-metrics/Makefile b/charts/feature-cluster-metrics/Makefile new file mode 100644 index 000000000..66c31bd1a --- /dev/null +++ b/charts/feature-cluster-metrics/Makefile @@ -0,0 +1,29 @@ +HAS_HELM_DOCS := $(shell command -v helm-docs;) +HAS_HELM_UNITTEST := $(shell helm plugin list | grep unittest 2> /dev/null) + +.SECONDEXPANSION: +README.md: values.yaml Chart.yaml $$(wildcard README.md.gotmpl) +ifdef HAS_HELM_DOCS + helm-docs +else + docker run --rm --volume "$(shell pwd):/helm-docs" -u $(shell id -u) jnorwood/helm-docs:latest +endif + +Chart.lock: Chart.yaml + helm dependency update . + @touch Chart.lock # Ensure the timestamp is updated + +values.schema.json: values.yaml $$(wildcard schema-mods/*) + ../../utilities/schema-gen.sh . + +.PHONY: all +all: README.md Chart.lock values.schema.json + +.PHONY: test +test: all + helm lint . +ifdef HAS_HELM_UNITTEST + helm unittest . +else + docker run --rm --volume $(shell pwd):/apps helmunittest/helm-unittest . +endif diff --git a/charts/feature-cluster-metrics/README.md b/charts/feature-cluster-metrics/README.md new file mode 100644 index 000000000..35de398b4 --- /dev/null +++ b/charts/feature-cluster-metrics/README.md @@ -0,0 +1,316 @@ + + +# k8s-monitoring-feature-cluster-metrics + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) + +Gathers Kubernetes Cluster metrics + +This chart deploys the Cluster Metrics feature of the Kubernetes Observability Helm chart. It includes the ability to +collect metrics from the Kubernetes Cluster itself, from sources like the Kubelet and cAdvisor, from common supporting +services like [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) and +[Node Exporter](https://github.com/prometheus/node_exporter), and from systems to capture additional data like Kepler. + +## Metric systems + +The Cluster Metrics feature of the Kubernetes Observability Helm chart includes the following metric systems: + +* Kubelet +* cAdvisor +* API Server +* Kube Controller Manager +* Kube Proxy +* Kube Scheduler +* kube-state-metrics +* Node Exporter +* Windows Exporter +* Kepler + +### Kubelet + +Kubelet metrics gather information about Kubernetes information on each node. + +The kubelet metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kubelet.yaml](./default-allow-lists/kubelet.yaml). + +### cAdvisor + +[cAdvisor](https://github.com/google/cadvisor) metrics gather information about containers on each node. + +The cAdvisor metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/cadvisor.yaml](./default-allow-lists/cadvisor.yaml). + +### API Server + +API Server metrics gather information about the Kubernetes API Server. + +### Kube Controller Manager + +Kube Controller Manager metrics gather information about the Kubernetes Controller Manager. + +### Kube Proxy + +Kube Proxy metrics gather information about the Kubernetes Proxy. + +### Kube Scheduler + +Kube Scheduler metrics gather information about the Kubernetes Scheduler. + +### kube-state-metrics + +[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) metrics gather information about Kubernetes +resources inside the cluster. + +The kube-state-metrics metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kube-state-metrics.yaml](./default-allow-lists/kube-state-metrics.yaml). + +### Node Exporter + +[Node Exporter](https://github.com/prometheus/node_exporter) metrics gather information about Linux Kubernetes Nodes. + +The Node Exporter metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/node-exporter.yaml](./default-allow-lists/node-exporter.yaml), and has an integration allow list, +[default-allow-lists/node-exporter-integration.yaml](./default-allow-lists/node-exporter-integration.yaml). + +### Windows Exporter + +[Windows Exporter](https://github.com/prometheus-community/windows_exporter) metrics gather information about Windows +Kubernetes Nodes. + +The Windows Exporter metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/windows-exporter.yaml](./default-allow-lists/windows-exporter.yaml). + +### Kepler + +[Kepler](https://sustainable-computing.io/) metrics gather information about the Kubernetes cluster. + +The Kepler metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kepler.yaml](./default-allow-lists/kepler.yaml). + +## Metrics Tuning & Allow Lists + +All metric sources have the ability to adjust the amount of metrics being scraped and their labels. This can be useful +to limit the number of metrics delivered to your destinations. Many of the metric sources also have an allow list, which +is a set of metric names that will be kept, while any metrics not on the list will be dropped. The allow lists are tuned +to return a useful, but minimal set of metrics for typical use cases. Some sources have an "integration allow list", +which contains even more metrics for diving into the details of the source itself. + +To control these settings, use the `metricsTuning` section in the values file. + +```yaml +: + metricsTuning: + useDefaultAllowList: # Use the allow list for this metric source + useIntegrationAllowList: # Use the integration allow list for this metric source + includeMetrics: [] # Metrics to be kept + excludeMetrics: [] # Metrics to be dropped +``` + +The behavior of the combination of these settings is shown in this table: + +| Allow List | includeMetrics | excludeMetrics | Result | +|------------|------------------|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| true | `[]` | `[]` | Use the allow list metric list | +| false | `[]` | `[]` | No filter, keep all metrics | +| true | `[my_metric]` | `[]` | Use the allow list metric list with an additional metric | +| false | `[my_metric_.*]` | `[]` | *Only* keep metrics that start with `my_metric_` | +| true | `[]` | `[my_metric_.*]` | Use the allow list metric filter, but exclude anything that starts with `my_metric_` | +| false | `[]` | `[my_metric_.*]` | Keep all metrics except anything that starts with `my_metric_` | +| true | `[my_metric_.*]` | `[other_metric_.*]` | Use the allow list metric filter, and keep anything that starts with `my_metric_`, but remove anything that starts with `other_metric_` | +| false | `[my_metric_.*]` | `[my_metric_not_needed]` | *Only* keep metrics that start with `my_metric_`, but remove any that are named `my_metric_not_needed` | + +In addition to all fo this, you can also use the `extraMetricProcessingRules` section to add arbitrary relabeling rules that can be used to take any +action on the metric list, including filtering based on label or other actions. + + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://prometheus-community.github.io/helm-charts | kube-state-metrics | 5.25.1 | +| https://prometheus-community.github.io/helm-charts | node-exporter(prometheus-node-exporter) | 4.39.0 | +| https://prometheus-community.github.io/helm-charts | windows-exporter(prometheus-windows-exporter) | 0.5.1 | +| https://sustainable-computing-io.github.io/kepler-helm-chart | kepler | 0.5.9 | + + +## Values + +### API Server + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| apiServer.enabled | string | false | Scrape metrics from the API Server | +| apiServer.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the API Server. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| apiServer.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the API Server. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| apiServer.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | +| apiServer.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| apiServer.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | +| apiServer.scrapeInterval | string | 60s | How frequently to scrape metrics from the API Server Overrides metrics.scrapeInterval | + +### cAdvisor + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| cadvisor.enabled | bool | `true` | Scrape metrics from cAdvisor. | +| cadvisor.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for cAdvisor entities. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| cadvisor.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for cAdvisor metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| cadvisor.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| cadvisor.metricsTuning.dropEmptyContainerLabels | bool | `true` | Drop metrics that have an empty container label | +| cadvisor.metricsTuning.dropEmptyImageLabels | bool | `true` | Drop metrics that have an empty image label | +| cadvisor.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| cadvisor.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| cadvisor.metricsTuning.keepPhysicalFilesystemDevices | list | `["mmcblk.p.+","nvme.+","rbd.+","sd.+","vd.+","xvd.+","dasd.+"]` | Only keep filesystem metrics that use the following physical devices | +| cadvisor.metricsTuning.keepPhysicalNetworkDevices | list | `["en[ospx][0-9].*","wlan[0-9].*","eth[0-9].*"]` | Only keep network metrics that use the following physical devices | +| cadvisor.metricsTuning.normalizeUnnecessaryLabels | list | `[{"labels":["boot_id","system_uuid"],"metric":"machine_memory_bytes"}]` | Normalize labels to the same value for the given metric and label pairs | +| cadvisor.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from cAdvisor to the minimal set required for Kubernetes Monitoring. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | + +### cadvisor + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| cadvisor.scrapeInterval | string | `60s` | How frequently to scrape cAdvisor metrics. | + +### Control Plane + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| controlPlane.enabled | bool | `false` | enable all Kubernetes Control Plane metrics sources. This includes api-server, kube-scheduler, kube-controller-manager, and etcd. | + +### General settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| fullnameOverride | string | `""` | Full name override | +| nameOverride | string | `""` | Name override | + +### Global Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.maxCacheSize | int | `100000` | Sets the max_cache_size for every prometheus.relabel component. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) This should be at least 2x-5x your largest scrape target or samples appended rate. | +| global.platform | string | `""` | The specific platform for this cluster. Will enable compatibility for some platforms. Supported options: (empty) or "openshift". | +| global.scrapeInterval | string | `"60s"` | How frequently to scrape metrics. | + +### Kepler + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kepler.enabled | bool | `false` | Deploy and scrape Kepler metrics. | +| kepler.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Kepler. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with __ (i.e. __meta_kubernetes*) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| kepler.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Kepler. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no __meta* labels are present. | +| kepler.labelMatchers | object | `{"app.kubernetes.io/name":"kepler"}` | Label matchers used to select the Kepler pods | +| kepler.maxCacheSize | string | `100000` | Sets the max_cache_size for the prometheus.relabel component for Kepler. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| kepler.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kepler.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| kepler.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from Kepler to the minimal set required for Kubernetes Monitoring. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | +| kepler.scrapeInterval | string | `60s` | How frequently to scrape metrics from Kepler. Overrides global.scrapeInterval. | + +### kube-state-metrics + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kube-state-metrics.deploy | bool | `true` | Deploy kube-state-metrics. Set to false if your cluster already has kube-state-metrics deployed. | +| kube-state-metrics.enabled | bool | `true` | Scrape metrics from kube-state-metrics. | +| kube-state-metrics.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for kube-state-metrics metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| kube-state-metrics.labelMatchers | object | `{"app.kubernetes.io/name":"kube-state-metrics"}` | Labels used to select the kube-state-metrics service. | +| kube-state-metrics.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| kube-state-metrics.metricLabelsAllowlist | list | `["nodes=[*]"]` | `kube__labels` metrics to generate. | +| kube-state-metrics.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kube-state-metrics.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| kube-state-metrics.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from Kube State Metrics to a useful, minimal set. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | +| kube-state-metrics.scrapeInterval | string | `60s` | How frequently to scrape kube-state-metrics metrics. | + +### Kube Controller Manager + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kubeControllerManager.enabled | string | false | Scrape metrics from the Kube Controller Manager | +| kubeControllerManager.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Controller Manager. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| kubeControllerManager.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Controller Manager. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| kubeControllerManager.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | +| kubeControllerManager.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kubeControllerManager.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | +| kubeControllerManager.port | int | `10257` | Port number used by the Kube Controller Manager, set by `--secure-port.` | +| kubeControllerManager.scrapeInterval | string | 60s | How frequently to scrape metrics from the Kube Controller Manager Overrides metrics.scrapeInterval | + +### Kube Proxy + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kubeProxy.enabled | string | false | Scrape metrics from the Kube Proxy | +| kubeProxy.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Proxy. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| kubeProxy.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Proxy. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| kubeProxy.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | +| kubeProxy.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kubeProxy.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | +| kubeProxy.port | int | `10249` | Port number used by the Kube Proxy, set in `--metrics-bind-address`. | +| kubeProxy.scrapeInterval | string | 60s | How frequently to scrape metrics from the Kube Proxy Overrides metrics.scrapeInterval | + +### Kube Scheduler + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kubeScheduler.enabled | string | false | Scrape metrics from the Kube Scheduler | +| kubeScheduler.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for the Kube Scheduler. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| kubeScheduler.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for the Kube Scheduler. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| kubeScheduler.maxCacheSize | string | `nil` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides metrics.maxCacheSize | +| kubeScheduler.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kubeScheduler.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. An empty list means keep all. | +| kubeScheduler.port | int | `10259` | Port number used by the Kube Scheduler, set by `--secure-port`. | +| kubeScheduler.scrapeInterval | string | 60s | How frequently to scrape metrics from the Kube Scheduler Overrides metrics.scrapeInterval | + +### Kubelet + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| kubelet.enabled | bool | `true` | Scrape metrics from kubelet. | +| kubelet.extraDiscoveryRules | string | `""` | Rule blocks to be added to the discovery.relabel component for Kubelet entities. These relabeling rules are applied pre-scrape against the targets from service discovery. Before the scrape, any remaining target labels that start with `__` (i.e. `__meta_kubernetes*`) are dropped. ([docs](https://grafana.com/docs/alloy/latest/reference/components/discovery.relabel/#rule-block)) | +| kubelet.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Kubelet metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| kubelet.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| kubelet.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| kubelet.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| kubelet.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from the Kubelet to the minimal set required for Kubernetes Monitoring. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | +| kubelet.scrapeInterval | string | `60s` | How frequently to scrape Kubelet metrics. | + +### Node Exporter - Deployment settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| node-exporter.deploy | bool | `true` | Deploy Node Exporter. Set to false if your cluster already has Node Exporter deployed. | + +### Node Exporter + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| node-exporter.enabled | bool | `true` | Scrape metrics from Node Exporter. | +| node-exporter.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Node Exporter metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| node-exporter.labelMatchers | object | `{"app.kubernetes.io/name":"node-exporter"}` | Labels used to select the Node Exporter pods. | +| node-exporter.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| node-exporter.metricsTuning.dropMetricsForFilesystem | list | `["tempfs"]` | Drop metrics for the given filesystem types | +| node-exporter.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| node-exporter.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| node-exporter.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from Node Exporter to the minimal set required for Kubernetes Monitoring. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | +| node-exporter.metricsTuning.useIntegrationAllowList | bool | `false` | Filter the list of metrics from Node Exporter to the minimal set required for Kubernetes Monitoring as well as the Node Exporter integration. | +| node-exporter.scrapeInterval | string | `60s` | How frequently to scrape Node Exporter metrics. | + +### Windows Exporter - Deployment settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| windows-exporter.deploy | bool | `true` | Deploy Windows Exporter. Set to false if your cluster already has Windows Exporter deployed. | + +### Windows Exporter + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| windows-exporter.enabled | bool | `true` | Scrape node metrics | +| windows-exporter.extraMetricProcessingRules | string | `""` | Rule blocks to be added to the prometheus.relabel component for Windows Exporter metrics. These relabeling rules are applied post-scrape against the metrics returned from the scraped target, no `__meta*` labels are present. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#rule-block)) | +| windows-exporter.labelMatchers | object | `{"app.kubernetes.io/name":"windows-exporter"}` | Labels used to select the Windows Exporter pods. | +| windows-exporter.maxCacheSize | string | `100000` | Sets the max_cache_size for cadvisor prometheus.relabel component. This should be at least 2x-5x your largest scrape target or samples appended rate. ([docs](https://grafana.com/docs/alloy/latest/reference/components/prometheus.relabel/#arguments)) Overrides global.maxCacheSize | +| windows-exporter.metricsTuning.excludeMetrics | list | `[]` | Metrics to drop. Can use regular expressions. | +| windows-exporter.metricsTuning.includeMetrics | list | `[]` | Metrics to keep. Can use regular expressions. | +| windows-exporter.metricsTuning.useDefaultAllowList | bool | `true` | Filter the list of metrics from Windows Exporter to the minimal set required for Kubernetes Monitoring. See [Metrics Tuning and Allow Lists](#metrics-tuning-and-allow-lists) | +| windows-exporter.scrapeInterval | string | `60s` | How frequently to scrape metrics from Windows Exporter. | diff --git a/charts/feature-cluster-metrics/README.md.gotmpl b/charts/feature-cluster-metrics/README.md.gotmpl new file mode 100644 index 000000000..987735e85 --- /dev/null +++ b/charts/feature-cluster-metrics/README.md.gotmpl @@ -0,0 +1,139 @@ + + +{{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.badgesSection" . }} + +{{ template "chart.description" . }} + +{{ template "chart.homepageLine" . }} + +This chart deploys the Cluster Metrics feature of the Kubernetes Observability Helm chart. It includes the ability to +collect metrics from the Kubernetes Cluster itself, from sources like the Kubelet and cAdvisor, from common supporting +services like [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) and +[Node Exporter](https://github.com/prometheus/node_exporter), and from systems to capture additional data like Kepler. + +## Metric systems + +The Cluster Metrics feature of the Kubernetes Observability Helm chart includes the following metric systems: + +* Kubelet +* cAdvisor +* API Server +* Kube Controller Manager +* Kube Proxy +* Kube Scheduler +* kube-state-metrics +* Node Exporter +* Windows Exporter +* Kepler + +### Kubelet + +Kubelet metrics gather information about Kubernetes information on each node. + +The kubelet metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kubelet.yaml](./default-allow-lists/kubelet.yaml). + +### cAdvisor + +[cAdvisor](https://github.com/google/cadvisor) metrics gather information about containers on each node. + +The cAdvisor metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/cadvisor.yaml](./default-allow-lists/cadvisor.yaml). + +### API Server + +API Server metrics gather information about the Kubernetes API Server. + +### Kube Controller Manager + +Kube Controller Manager metrics gather information about the Kubernetes Controller Manager. + +### Kube Proxy + +Kube Proxy metrics gather information about the Kubernetes Proxy. + +### Kube Scheduler + +Kube Scheduler metrics gather information about the Kubernetes Scheduler. + +### kube-state-metrics + +[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) metrics gather information about Kubernetes +resources inside the cluster. + +The kube-state-metrics metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kube-state-metrics.yaml](./default-allow-lists/kube-state-metrics.yaml). + +### Node Exporter + +[Node Exporter](https://github.com/prometheus/node_exporter) metrics gather information about Linux Kubernetes Nodes. + +The Node Exporter metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/node-exporter.yaml](./default-allow-lists/node-exporter.yaml), and has an integration allow list, +[default-allow-lists/node-exporter-integration.yaml](./default-allow-lists/node-exporter-integration.yaml). + +### Windows Exporter + +[Windows Exporter](https://github.com/prometheus-community/windows_exporter) metrics gather information about Windows +Kubernetes Nodes. + +The Windows Exporter metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/windows-exporter.yaml](./default-allow-lists/windows-exporter.yaml). + +### Kepler + +[Kepler](https://sustainable-computing.io/) metrics gather information about the Kubernetes cluster. + +The Kepler metric source uses an [allow list](#metrics-tuning--allow-lists), +[default-allow-lists/kepler.yaml](./default-allow-lists/kepler.yaml). + +## Metrics Tuning & Allow Lists + +All metric sources have the ability to adjust the amount of metrics being scraped and their labels. This can be useful +to limit the number of metrics delivered to your destinations. Many of the metric sources also have an allow list, which +is a set of metric names that will be kept, while any metrics not on the list will be dropped. The allow lists are tuned +to return a useful, but minimal set of metrics for typical use cases. Some sources have an "integration allow list", +which contains even more metrics for diving into the details of the source itself. + +To control these settings, use the `metricsTuning` section in the values file. + +```yaml +: + metricsTuning: + useDefaultAllowList: # Use the allow list for this metric source + useIntegrationAllowList: # Use the integration allow list for this metric source + includeMetrics: [] # Metrics to be kept + excludeMetrics: [] # Metrics to be dropped +``` + +The behavior of the combination of these settings is shown in this table: + +| Allow List | includeMetrics | excludeMetrics | Result | +|------------|------------------|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| true | `[]` | `[]` | Use the allow list metric list | +| false | `[]` | `[]` | No filter, keep all metrics | +| true | `[my_metric]` | `[]` | Use the allow list metric list with an additional metric | +| false | `[my_metric_.*]` | `[]` | *Only* keep metrics that start with `my_metric_` | +| true | `[]` | `[my_metric_.*]` | Use the allow list metric filter, but exclude anything that starts with `my_metric_` | +| false | `[]` | `[my_metric_.*]` | Keep all metrics except anything that starts with `my_metric_` | +| true | `[my_metric_.*]` | `[other_metric_.*]` | Use the allow list metric filter, and keep anything that starts with `my_metric_`, but remove anything that starts with `other_metric_` | +| false | `[my_metric_.*]` | `[my_metric_not_needed]` | *Only* keep metrics that start with `my_metric_`, but remove any that are named `my_metric_not_needed` | + +In addition to all fo this, you can also use the `extraMetricProcessingRules` section to add arbitrary relabeling rules that can be used to take any +action on the metric list, including filtering based on label or other actions. + +{{ template "chart.maintainersSection" . }} + +{{ template "chart.sourcesSection" . }} + + +{{ template "chart.requirementsSection" . }} + + +{{ template "chart.valuesSection" . }} diff --git a/charts/feature-cluster-metrics/charts/kepler-0.5.9.tgz b/charts/feature-cluster-metrics/charts/kepler-0.5.9.tgz new file mode 100644 index 0000000000000000000000000000000000000000..80b2f9012d3294a69699a7f5f426e4720893b911 GIT binary patch literal 5066 zcmV;*6E*A~iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI|QbK5wQpTGGj`jqT#l8Y$G4`rgNsVhH}j4O$)wVdp2%}z-W z*^)4(2o3-$(ZspWeg%LJ@Fm+B$8)JNQx#JL@aqQ9-Dor(mly@eJ4Z{xrMn@k;D@_^ zy4`NKzqhCVcDvp3-`?JC?}y%Ae}8X(cW1ZP{h{02+1ua$0lMFSm&y|hN%#-lKTWG# zx$op5VaOzrl!ap801z(eoOp5>&s@qn0rfBxIDmtQcuQ>WxZS!;;iVYBl>-3KhLEgq z03wbe#-;uS5c6OFODQ8U=yVoTm-N`GBUJ0tkjw&Xdu$cOl7@>mWt~JUsSq)WPKX~1 z;6C$29da$4VzJ7!po{PgZU)er({O=2;xv>4c-cFAwcG3Ozj$@nKRn!jx!XHBe0B8d z<<7zW-rnJldoTC;!`=Q)@86*!ta(U@U*cv)1S;?uC;rOw*v7|oCt5_4OWX*T2oL@p zwj2`C56A^&;Q-b<4vC`dC&vJZz;@St;r4otkHX_Lk~-}Ge#T%0p3+p9GY+PW;K`eK zhCD=xqOBP4VK|>tk76ic#6BX2x46athM!c2Ab;vCnQFvq8*M39L5GGuesa~TLp{Yt zT8ylTBwVW+;&FMIp_7yaJL{}w;(bi4n#{%Oy-#LY*>eKBy_-~}x9 z+aZy3jZPbCnQ+=k9i2AdESfu}_mgje#oR*;c6-Tkbe*wA*dx5wzQIj!{576d;$V%`C9F zem4e(hWbbDC%~Krs5SCzjhs^-2heKOU@b62P9*vo)tSdZP$}im60oIG?FBX=RoIxt4MBuO87{F z02V0KyCRd<|L+2FJMDlfsu@r4MvBFV1=QO(pcx+N9|JfDK9Y@aOuLNH<0wt*DhsYM z(r{raCJf1l(>0N3)z37JB4Nsl0sQi7f_UKjR9P<)z%juU3&$wI8q*zne233ps>LGa z7YanoKe$PmfXpz^+286nE9Z#QH4RWziG)57&qE~y=`uc4d~bVP}S>cbStAAsaB zCIn8Ik03BWPc?;t5RwxbN+)DKj&%a#3g9vum6c#JF@S)E@uvjmf(1$`6oz6(@gapH zv#;72YXi>b=*a<`vaz>BKMqj49GA**Q3TtZ1?Uz@CADow0SQyK59V_kQn>*h2Phnp z{IvS)X!O4GZuky3CgZA;y;-@2pCl)M;hJ(5u9SzdCY&mD6Ar4hVYvQ4xaR)NaP)R~ zK6!igdh&MoVfc0ct)5Yscf*VGlcUM#;Ns^2v^p!4oO;5f9iE&XPY#Zc&xhmj09svF z{~NsQb$3*T;pxHQ+u`K(==}g%s$Z;+|2Vi%?i>k({dIUadNnzEeSY?SbaHGL`k&F+ zcsTj_pOd4r_oo-b^NC?=T+MTG{*UW384^}{nUT@?+0k%3uFG*WdOtZhKl*uc_F;H_ zesVlaY)cvQ@6Z0O8|i#_{OV-$XRTZO#iHz%XX zc>H$qVR(M>>R(v}9FK(WSR87pj|Crd&q%x~+=&J62=2j(Y#?M3$ePeV_4jk22(RA^ z9Y-KvQ%@W3vata$Zq`u{3yJ(>q!f#B?Pqs)vZ4o`moo4O#u0jSPEo+eCCVi#O-=GC zeO4JkELj}*FhlS-s-eY!bxg& z-8w=WUq9gmsL+Ccyx9yK0^!JWe9Kpetmt9KgQ%7UV`7fJjNak+%vtncpUlTNF7VR;p19ARW?yu=vK9k)+)KV1+mZ|&$v1^MnXFKf zNp8|65%VXCnTd8EOg>5Y0;MiJ02&HOLJu8>qd%u&+1N;mS)HTO0G0k%-p!Gj&)|TN|R|cVSKV=LS|K~kl6Ve5<5*K zs{UW%|EHoSI+G;^5pv?>tKEVG3qxPMI-0p7V@yd#LD|a<{2r4)jT63d%r)Dt^G`I`I=@sDR22lw&?B)x z5Nw5zhf5MioMPY$ARHl}E2?a>9w(@PC$o)ih~v|-%Bu{vh|qiLIw$iqR%1XCUs5Eq zC1dWB3B?T6K;Z)sKs56Sw@vMujt6<$(s|aD<>^z}WR2-JY3@K|IFB{!gP(5RpE-|N zBN!0(+_aD>oAO!scuVGVkZumGzlrwWM5}~lh~8Yf>R$^oXLY91k0e5*I#PFNA{{1| z9#5J(RcFe3v|JOrkRM8g)bAjhPOCH$MWty>7LNP;xu(b!{2nujrTSJ; zRwN_~^xLzI0ZJuioH%oJ?Ul~ru4}P!CEK+e7{8;Yjf{%B^?ZA56|qn|ADF~#8>O^t zw(A|$E!D7{@Y5#4Q_SWjE^f*AKPgu-w`K0rCaBOd54N_apsnPkbt8a^M*r0#?Emkl z;{W+Nj3n?5R|Bu{|6c6%yJi1xuiJa@|L)^yx7&pUDH27Zv+g;UH1r2~Own0clf#>s zw8E^^w!Ek5XOk5IFqBaxM_+=_Ajyu zngA1{q1-%GL|73$fEAJ6vN&h0#j`5Q`t3OlupJS({HcS|>m+IEU)K}s5}`te0${ix z>#tHRMgPlX?X`f~!s+UOC5?$%yGlt4B5b;blW>9X_!2kI;jxBMbKSJCy@XMgiEPkQ zh3)xA4Pz>o0k6$hBO{94LL4-+10$A9!5V64;i`_mMRrr8rIbI;{@qti58s zUS_jvu~H?B%5(xc>s0fk2$B>F-Qc5ft!*l`2HLE}Mf*%Xs7QC56 zvQV^L%5Q6j%Q5?U(w0#!=7uugJa%a9}UTqma4TUsvRuOc2-$I_Pyqh5}d`o|}I!Q~nBIJ{p zbT`7C(y{hZ4EWU!;Up-&H>!%X`;Vi%z4SgCe?kWOw-Tv?0ZYBQn;vxQXKTkRS z!)L^ENi3I)(?7JPxR*bgfP5MhN_KOcvjA@!7HWH@tP(Ou??p2EoJd6VW-c*+Us|nS zwc1q%+qSk!BAqe7qjY~)`lXW=DHVt~`?M*2Q*$MeeT$fHeyowtEP@M^+51N#y=C?* zph7y%pKHBCqh^1m5!;EXI_gIt$Y7=uXA${5Mn_0a!^LsZ zSXOXxU_#~AhBP~Bo`COs?fPep{+m$D9UcX_R{#6e{a^iu^B?!}Y|&sX2j9iQUn;ZT zU;)tRXicb6C74oH?6zGEs@^cxX?C=l=&fCvGs!#_4B+BuR9TO_?K0h7)gCnIf3kVx zUdI3Z{qp>;zxQzd>wcaa?eb}FALR2XUh&{a=~$2KS-= zJH5T~`Tzc2{~`W!FHgPxC#Oy9X+T;369J*&!u1$OCiJoEvQa9X5balH0;menH z#Muhv5@TWSdMFu)dd9O%_7;b`wSCo~Z1(T*p*;NXgnqlO0=9{d${Ne+jy9-YG9uItFzOkz^St2`y$2 zW-a;_^|Ve-<=~a$R9~^`k%{=Yx<^e*cpk>wGLU|o4Q}7T!1wUzCmH|UB?c?H2pPw_ zr*WPC-`_9K|9ATR{=@ixAI~Eg5h;!gU_O#gdBj zZXW?+i9rC1fX$5MpyA@Vwr!HGQCm>!I|+T~5rnuf$KIbr9Ov{C`o?h0qE*y73Wk>&S*mvC7AH1XgPd_Xd9ra)O zQ>?>I4m2a)WgO`fpTc?OijR@=%$<=-=b0;4>YZ`AaGw3I^9Vi=PFXDA!-sJ{^Is4?4P|qs+&L)YWnxDWk;63MF#k zI9H(0#Wg)waGYP0)tF0X)E-*))4787mIt;cH)hnpQ~QYIw898WQkrht$udq`t-Q1b zE+6om7Jzy4%{xU7s=(z<;}ziTSMub*H@%Ff&@2`Uk}S$CrjcoSX-_(%(?gbNFk&AK zY^qznM}1lY9OZZTG|A7@+~zYa0b6_ji@;xZ-%kzQUw)k$dc}hv(^9_XYkkUqpTCq^ zsCvF93&5AyAbbY6>CT@OQ7%G4EZt96qeO6(?Y1*l z0c|5@nnhXT^XyWfyE4Y#$QIX{;%p<4%klRZg%qwA#fcJw7sZf#-^%Z_*X^zf;#Y&a z!j-X{S72VRgT8|4PPhAxPHS=XeFPS_DZl#r7H+TOH^I%gwfMaTy27s#kD+xR)Q9Xo zsOv?0s<`9x7sYnPZ_!5Odx1llbsv{b8S#-S>3evMT{ zNw1QtYvA?heNz{htDC0Hz=cb%rX^sN2x;Ptfg7*FnqFytP9U6|{{xO#97^P=Gk0U) zH}4OJZ-*BdXu`1pPA5eIu72|aQrM4H>H&6`u+FmKG&@1Pjg0W$3|bDXWe>G z6llu{$(QY`zJd*)7xtT)V47WJR>D;in(erHy}uFnWv{z4Ep{EGORC>>7Z#V2Gh38i z?pAkU1r_PNZmMPU1zyuy%z|`m7mF0TUM%dJBrG{I6SGvKw=}C(>SE`mD__?RgJ0Qu z*A9bU*?Wa{DXjx;?Y(3{Q1xEAY?$|6%5fPuTf~^Az|>pjOB-gcsqbT@VYS7rrYHla zi%F@Qr#aONK-oG|Ls#y0#dHgBVHIgw08Wk#Hp$4o*vhe4Da;+cWaY3d$Z0cMz@NWB z&av^=%?{1OcSm|sbuxN)(kzHyC4i0M_f=w-I;i;}CQA#`{ied2P1m<@l5<*wD+q5< zc1;#9!)}n>0^Bu5r^Iccn~Gk(fwT5?POZv%9DNKWB|+^R?FyzvVM^f&QdAJ42F9w~ g{na7Khv(sWcpjdI=N_K_3jhHB{}40PG5}}*0P?WtB>(^b literal 0 HcmV?d00001 diff --git a/charts/feature-cluster-metrics/charts/kube-state-metrics-5.25.1.tgz b/charts/feature-cluster-metrics/charts/kube-state-metrics-5.25.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..6d362292dab37df5491ac5c5eaa3453340e61a4e GIT binary patch literal 14234 zcmV;LH)Y5liwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBxbKAC-`2Cxo0xRWi>~2U&w)41}&CYd`w9VWk_1J0e>~t~> zL_!j3l3)SQj_T(3*?)tB2f>G)cAPY8b|$q*;NSo_I4>L=oR>(eQ6*cth>4 zd`clP|J(4|Z8bah7kO}=Gl?Z-xfllsVJ_(u$8ugyBFY9SjY%%Z7#$Zlo|Et(8g>=R z&&3%186bpEh;p2fF#;k&XRPEgfe#3kJRPICl!X`%1~UrPW0nmH&N3qBq!eMyvaHOh zTny$U&B9m#5dkjf&x8xg@-ezN2yjujp9j(CFd7C45izHQgm*7 z`Ny!TmEpW3=|6c%W)Ue}$xOYdN+tM$CjT6MNa83Xu28~c5(oP4b`JvqvkE9~KRR+z zkTJqVk$C@y(IomJsneYfzy(R8x#Pm`Tx=3Xmr%z{|`qG5AXQ@ zZ9IMSoJ?_+F zLb1O^7eDD>69Gbf^m2;ITo8#$LC^*IFBE}L5m6-o0)!-tSb?vCxa0K9v)0ABzLZK?o5#}5(5SyYa zjZfh!d_);81ll`)B0?b-DcOI3SiO#rO1ucPmoZKhh8fEd$WdKEH|h5Vq1Jeq_jW)k z1V&509KaXGw*ub9a%sn0GQ=(j=QJUX2GN`esSvgRPaT=|dO-k0 zrbtXOAm;~2&Z$6`G)+-Lij*z7v=DQG?eXp@qrGNgIX2S^DOvFhIfO$nAI z-$6I&8;nR20yat;>7%!*%Ty>@A|DFEX-0C1Q`?N~o%)LtlrtnGHiSPbF~_+iL_~pt zZK0}pCOMv@BpIVAP6Y`9+s`o?seyks$N7xtVS|xYttuMElFSz9a!zvOgH#wMQ~=B< z=p#<1)c{nzRtjmtVljwW9+N_f!6oD8DZ>f8lANU};bKs=Iq2I)VJs9nnjunwdZ#Q+ zX+Ha@NU$V5HS-1G=wGfmTUaQRo|kF?B8)gm2^Qo5x?ih*AH_IFDZvTQ+a*K1%ya0Q zQd#rZuwRVCoN=j|qAXEMThfduo{|!D|5Pmp2TlL2s$@#%o#6t}90*ZiR6@IFrhj8( z>#7NOQ)Uyw6?B#+XikOV&k8Y4ln$!?!m!HJkWFxW9#pcz8vqa+YOKsM8Vv(ZE~w(p zKWLb*Xh!8&eF*yK6%|soiIapX?GL9YPAS5CR;tN>RUET?N@t4nIwV6MhKPK?S&@=4 z3PVk2;p`7ugd~rdB9;dz3{y4>r!*xG5RtJO3^hKA+zKWHourDKX95OsLeP|zd4d#` z@1XKeQZ^Z6SlJMU_);3WE=5@qkPnjMV?NVdO!FOB7Yd^s&?*Jtnsu;3B39~QQVW)q zxl~kgeDYGwcS3SWacY(vd_5!lg2v=Hj@7TX?40CAJ_ka7QY*4xT#nI`;gccoP&=7w z#I*tR@&pD~P4*?`NnOe*Sqr-RGh0~k)Z%%G(=&aPmI*>qaX8hVTcyT zF%dvbYHaJ}(Uh}Hy%z+%JP{3e+Gagf+{wxuH$0{#-)KfxAA_LZN5>UGr+7kAXd5Sq zSx1NyV>ShGDJ08YK_RHFP{&2lX=^FaKU%3KNJ?VKxM?A5bbP7tCC~yp{ z7pH12i9~yJ+Qh`MW+mkTajKLIrHBZ-l%X8s2>ve_Qe8%p?FUGe^a}YCaGWGcLSA7O=n;yslUCW;v+Oeo$COh_r9j^0$!ir|TGB`ye`z}J)t zshHR$mhl`u1A{*;Q{p)Mv$_;hB(Ijyhed?`t$4oSl$P=ZVtK2m>YsK9f!?9rRSEjx z?b{PX@}yuimuS!FrE1nlO4RR#A`;WYeq=NoPJS1Fx605{6Sjk{VH$_FTbhPUMHtg!PIxFvDv9V&Gk}+0L(%9SiGC^Yj94aB-4ELajcsvcO7@QQp;$~cjsoRhHmC7d!I;;O0=3ij`_I%1aU zaH14~Q^w?XG$mpoB*_d1Qw2a#IN|hyresE5h#03>dyp&LCB}ueb(HpE{MZR+h5b83 z$FE)mL5+EUL|1h1Z^hP1F#b4E_BhEClrv!d!c3YqHe7wZikUJKE{NvZU~k4O7cpg} zFu>zEQ>#=>vDF?UZOVZ(pt)izf(T_DgJof?WM1ZoDeiMYIm`7r07jcqDN|ygJ9Hi( zQO0wH!fTpSHNTM(VGCBGgrQ5E*LvesD|6jr3Fu9k8f;~6YWOM5RUw@GRthMQsU~uw zls3#4547|p8N1LXVqEfcq1XhB&{PtR?pH&8KXPC*D$PXo5sHga*&V~ozQ{-h{N~Zo zYwCKcSzNPHtIpxUYid^91xw3}yjE(0Fq}^yr>1ND&J+T{*Tv9<2KAF5(=vr=eEaGQ z#e_>br7;KuH3sKop-TEK8l_arH)!<)%lQ}$h>Qm^wFF|F`3+VQ`X%fdX&4K}j?gkx zvlmK)vdOih3f1imWv&`Au4P&B0EU%v0$sODuBOC+H*HR|N@8@#23W1oMOJN6D@Pb_ zi_sRTFxn$_s*T;>Njy)=v`ka2B9g2~mHlf52SA@HV}s-|s6I*vGmgN8ragclCai`6 zZV4rIz-m__PWvj?c_UVHLGU*k*CVGFO8>VqbFjh_)*LU-EYXAZlOQO@uz&aOGxXwL zXXxeeYtUe|Y_OuZG$29o zjpi~-JIp3Cuh+MO5!%EOu2ecT??x9~YPGhJMV^$bS#PM)#+_#3-kcd5&8^zs*u)q} zrl`bxMx=#kN);@5+x1?_dr(NgtD?k*;-pGD48fQqng=QtpF0PS{*oyDu26F?=U7%= z$~Y#4B#GgneXV3wZ77S|Hf>Ok z0~a`@U}~H=dO&6iQlNwA;Bhnr7L$`p!fWV3OB-RAl6)rT9(J1h+!Bg!a7JDM2D>4v zU3#~LEx5rO=mH6LIyX!|I=u4M9)co@e_0)k=92IWTrD_cNb@bt2rH%c9wg*X+%MYi zD{~Fxo@9nmfE9G$vrxlLOdVb72PumKpQRu)XF20`Mf)FgCr$~Bh|=BNf>%Pi>#d0A zBqMc1m71$PxL8SJ%mL|hl5140v&Sq*Q%{!zPO0kGA!>=EU}jb%Zw5R9;jSep_%K8T zRLfJES2kzm&A`HUL|HDaZ4_|nfk8D{s8SU~kMe#jl=yd3;q^}U#qn&d18E&e;~AQb zBKz%}EXGLv>wl1`(3X9T9tBCKuMMs>LNwHQM~io~_{nL}^99f;!svu0$ePATyGfR8 z@1snaVU_+KY+%^C4YmRx50pyljCS)b==XzyB^C+es_AkUb*Woke+riPMeOIBz_E{u z0`ttw(oF#sB#v2D+$_w6h-K;vP{nwloyZoBa9Yf9RBr-29V9v7Y)Vt2)B`a__rJNn z4GiO`@L-J5{e;EmgxhZxo*mE%;DpGH;BlqlXQToc_)7QB+M+fNR^P*#>-_~(tRnKj zlv+WBeN-zGO;dcqI0{4bo&L)hs*U!1L-iTB`Lt#EjThVpNSS&xpPAs8!Nq9s#&6Qp zi)l*B3>LYnr+-5EgTXWS(X6FW9sA*g+QU$d5;#&c;6?{vuy8@aC(NDd1<2?c);*$1 z9vAcs3Od`yaz7Lz$zJ5L<}cI19rY%(gOC=5(Q{%qUy0s17RY>1d%~IVqhJhf+b3II&N2d z<-!Y=)bX5CKw&U)N+0Bu!LmxR5O2(uIRu?i*pv?XsAv|kadMk61zzT%7uxMJiDWWR zxIL$Wm%xp_Et8p1fnm2a%Wx=2fwl8CHG6knB^J$3>O`-VvkkhslDs^c@;KzClyz%_<>Pik z#8rX3E``;2u_QS2iL3qf=4j0ul^Z5FPv`;Fy{c24>Dm(!Rwx*v^ImAmJHEg)1wUEL zQmuL|KjYB%27+o0#f3*IrC+^hr+Zz7bVplVI2m|<;Wm(Fr9a`eWt{tSXL z!-e_DS^gWF=pP9t8OsGRMJo+M|F-T%{cFxR{X@}vTCfD-r(>Mz-R97mRz&}D4M5PI z`IBZ;a)^C|-&rXkT<(%g=8T=!6g*v`qDB2v8KQu~f2=C6WqL4_o_)EnAyifPvU z$-HGr;xvW{XbkRy{=q?#)kY~Na$G}8ltsa~)UJnGNFPMsJ+nlo39&qSX$z2eJvx#o~5L=RJLzXdf|-7C1|twDSpQUj@d8=CM%j6MWdtWABx~s6w-^(uslK1%A&1_ zx_U5Cp4h-7WVS?u&QqpUsB#^c(L{|iGa1zgX|8OFRr97+0h|0rVi{FY(^W(t$qKnZdoDby z;wV*vzyMuh1uYf*cJ|E)QLDLVA4j@?Xgd#3WEPNnAsV*k*vRqFD-(J~Mm_GkVtT;J z6<*7x0*=yYbuqHEs^i0p5-3_PToZH*N@a5Dofml53YDbsyt1DR)q8Q04$2sv8ioPE zG>p67$9Vz)F(#O=s-h2$j@tzTd#+1y4lsc62V^$?ywR|;Ua%xUs?4!2;v@_3tOLv9 zEYBJZhla)ZEzo>@3pC%R1$~|dPA*nkb@k!RXihVndQo833@{^~6YUk95QI~Dq1VCB zC&x&i#Dw?>ETtlDAO&(kV{?QA5Tg01cU-mEybXk{s4y`IYn29Epf*}m@557}ObV`a z7)4GiAlOY*4^E;FmcmIkI)!jqfEu9_d&Iy{n5xzLaXN()b~%+4r|M68?z5-mUr+Iq zo(-DT?Y?G;;j0gdbJZf86;N=%n4@I10B0SHA+*S{8jaFm&j_)C2u(=JE`wGOb02j? zFl%8Dz;>Ql^YA9r)hq1QAl5)>1x6k+&XgHz1RM$(>{b~RR^j$SnGPt!Ii14E8HWVu zS?0LDJ`eiZ7V@}UE!T}b8h=jnWNbEaU*p2=e-eokEV14EOdDIF=A3?Yat|6cpZ$vF zSV;))P!C9{XLs05w7Op80@3_8qu~3eYFx3X+QjSN-8>0tPNkn5V2sY`C7p4a3?Q`u zO-PLSSjQIw4vLI7#Dms~MnU~|CqAaZ?NB=Nx9VV7m&F65_)JcOFe)ZaDEulon-FpR zjn(SCZ76h&ga(OFHwJMxfT(`0aR4EwcY?uasO?7d0|Vkj741}wrkJKBCvWGRh&fA> zF**!%q~-U-oVigyejwPB+*NRx`W7IVBZ|}KB*lv}60YXSd&dO_2 zEeWY(h;h&b0pf!aA3E)2HVl5$=RY)A-Ge} zGa>;6HR>c!(8rH=+Q47pSt5dui2_qDv>O-8#8wZ5 zRYCA1>$Hv$1SDE^txCGxvOkgJWgeqPhwvxO&dTYOen7pj0;ZUb`dg#=%x+EFHWUwA zpx;ZJ(kUeg!bJg{h=P9;4IRo$)rRVjKob(r+b7M6-R^8?qsoRG>Y5 z9tb^ubEe9|c6veL{V1rmbTtqcmINbc4zM>uo?vfIA#bqVWRxp@Z3oAbgZ85vTw7*o zfsIBVG8vg4_n;@^r<+f(ho>794JA6&Bc>}mTx)N)MzG*Cms8aHy9oa-dJSm0-EBy2 z+21u%Y_6Jyls896Hk#jhl7=_SX1ZQ1GSM2Upc2_vS8jUtY{c6d;lIBOH1w-E(2YiV z=8xV+K$ecC&imONccGQdYTFRJb6J^68din8=qa6UDw{wYh)FfRDHk)!x9(_T;9DE` z-gty-OLYulgWz>XywdAv&*LwOf!j@HZ1R8^hNVk^6L~W}0~LBpSCzl$_20&^jsAq) z8r6y3fAvuB*WL~s{(HOIIKhI(HX+_7XzQXqMQd}CW|5c=peN(CMw;oc(t?BbIin2c zct(_K4RjGa6N;shzH4 zmu9&ecbB(a^|n>zPxO1qWNk^ed6b_DQGZ6VNYS2T|BJJu+YZqItr!}zMuJ^dRl;I4Wv!S;%uC7+jxw2sR1r)+2?^ zh;*57k)K{dbeu~^t#Pb>k2CA8MIKR#vQ% zeyI0;3Xt{H8i_a@fL4Nz^`M$rG90XS`iv#O>iqLN#xtyBU^6c{dYKm`cwY7r8cWpE z6zSx)$&v0=>7nRKM`_Wx1@GhjM(d^x)6P2tBcr{TDHYwVk0n{vQR~e<@n7msoan)_c}tf> z0c|{bok}2liNqXta3X&UZCZ5H66S-yi}ByZKmT4)KzGG@i&b(*wCD6p3-rR%4=RbV z-vA-;toh38hn;}6$Xa|p6Pn)idl>IH3!h~QHW=_*9_^*ls3pReE;8o|?p|M&r+E3& z>L_=s3$3gDYmIOhz#4V0>G8%(vK#X+zI&fzDc{~GY_-h4uo>KhA$)E}>lga0^#AMB zAO4Z9#V*%Et=<10KCb)!4@Qq4-QE9mE01AeI8Q3iR4oOLJ>IiNeqU0;B+2P%YU%B3 z>#_=C^pn>FCX}%wg^eH1*kG*w`CCx38@CoB)V84gI)9>^<_XCqI%+BDe%z#Kc!dF- zI(NyRs1DNE0bA^)2DQwgTWeq{zDkWv5#q*Wds9s44#+9zD8~|F`jUaB(;RZMF+*@9KBKfKBYn zc_@QR2`LCb@>WUitlY1vf2#?qCiXw0Bvv$jN~UlJvWcBq(WAhf$*4Ni+J`VhR=bR+ zj^MhYb#b**zdXSuN*!%E4nl){Jz4t3O>}KQUtgW^U8J9TRs7$1R`Y)@LYJ9j_X~m7 z@c-fC$Bp~n4~9p>JN|zwPv?egEdhrYEZaDo);yNxv+`Y-Rj6BcQL0eX90$A>Yq=@O za*JlE9Y@`LSbO%0eO0P+gSvn11-WK5&210d0-H}+I`0ku4sKj*NWs7Z=@**GdWsqDa zI-FGX)U{QymWb>Ul8wrpC^QhT%LLk3c^Q;PC94WnE!8ft^=f4jazjs|1m^+OMJbauXVU=Q?t^wy~gTJ+j?^& zzq!r)yqmTiZYZ1cCc!$&*nrFGhvXL(le6uf{P_IWH^;AExDQBYIr)yWZ0x;6`Z$L1 z-n8E9)FPI%MzwoYnoXUZ96$R6?djZZ{@~PT48aAlbuRBOEO+TEYL0Qc#OeiXV5`>s zmmvy6MWj!!uIi-Stms>cRXWusC}+Su^%l8T{!jE9qdDq5=&i-9!D2e|5jUIHg>?R5 z5xVr)J~G{KmgCi!u1>7FlGZLk%@cyF@au$lO(fS0+A3^rXq;?T#3dNmtcq;}T*a;0 zzvpFi96v?Pmamqa-DoL9Iu7+-4p3bC9rgZ1qDZOiLeIzLsWP_|zW&QSS)|a$z5NZk zwtBQ%=|i_hW^E|Bv#K`1mQHKX8?*1Fv{p>0HlA52x6N#E9Cr|I~S>d$b{8nM@l>!mAzXc*y4&h~JR3)AcSRn`JPs9MK4V-gs zo5oriN3%Vryo5@{emXjM3^^D_OA0y3RabhNv*B&~cOcfOJpI7hl+r+M-9CF0#QGsx zuEs6JWWy2ImV>Nt1~+kWX&H6d(D&WK_4rS9Znw6Bci?$AH^7bYYC*Oscfb0$0(q~L zpOUV|#A>TAj?)VQAe6U>~SRC9^ny$j>8FjxavtQGG$Yiz9(VHd)pa4)%x7nxb^KPG26~D7d6Uh2|!6f>SyFqyEX!(-M0Wc}eZAu25MNgs<+eE?tMwnFntfJ#-YV zLu(|@TL!0AtPY`_YHt~kX3097W^%q|Q0nE@EWlPiMfx)}d3cZ_L zQr+F%Wlhx~U1L$z0qwe~>QHuGR&_AT*Ht^Cr+rqgqI$KjXc;v+_QkCuh2P>P7t)dr z@3N8_psul$8jyBfOARnPFQx{ZRjbKnwCgYws(kd$N*E1MM;3}D_s-U|$>x=`?dFz( z>1e$}wO#KfL9MNsM)JbYWcSt2>P>}46Kl5^mX+Um!vRK=ggS3#h?9g9A)YQxT)EVe zQLxHFNKSC}R1Ls*FmSwU++L-Fx*H9~&1G`bu9PQFo;<0$H=2%&=D_O$1Fu}&q0#M5 zSU!<0>$`1#udUMBEUiyr)6lM0?ARvN)LSpMg+<+UdUGrLx^*Z!T45pIjE%g+O6~w& zBbi3U&=@$q4U~XR(QRdkPA}?zDY_fiva4-ZElvAvpLSAjf3}yhkBZZ7%A1$fU(PF6 zB5*@2d_$n?7h* z|5#D?Hn{#j?zsM!#r4;}sIAwyBXn)6Ahl{&thRAcONnJZEK8Z?z&5{Z9(LN!-nB*U zuqAl!cW&*h3p>TrsiJm|+1NLoA=tL`E=}buA?*|E{@bo5YqfT1Rl^Bva3FBS)o|iGnNa^{j>u-LByZCT;oF0s8mtb5SfC~dko zY_DYYdAokMOE7xaYGd7WuMP{{LZ$I;Q}g?>)7da+XRB_kKFikZ%tm|QH`$@J4X*2- zz2MoJP|H)_N`9FOvpu5X(*F%8o&i{@^t@D46ACB(M|J}xOb!E0^YN6!r zW=h2p!Z2(lW;WrDcL`wr>d(scUuh1Z)<1UR0PEKO!^5Ly{+EM?!@Ko=8_)WP#%7uq zcko{uYS%yYU(-)N`?+3yO!dbC|6Y>DDf}_I-|%*sq0);3ef)U;FPFt+<@&#N=12Xk z%m4Uj*v$VleE8_@{Qs>yS2vgcvARt_=YZ5)qMqc@XCwv+&1YVosJkqXJ7$4wdZ+4> z2nh>yTm3$IPVxo9X=?I0qRGPg#5;04u1vsa0IVKbFJIzHEKNCUm#%HxmhW!ei=giJ zk-j*{)?m5Y1b|(I`UuprIIhp(=9nY!YWuZjow`DRT(%gLj=QpEgxWE&dLc-kW_~fM#<|6t5 z3)It;Ke!kTPTFvKEla``uiqTkS!6;a9{Ec~YZo2ud==S+HqQF9;v%TcuL|qPztW-t zhu;CZOV;@HJgfOXOYHXfb!@h8dea8nl>Y;_Rzjc- zsC_p;gTFhbUaxl(Ed3_9a$8t725Q?JaH}D-8*k)%KNDVdREtdEQajZNtg-PgXU z`y1e%BviGF)1OyuEN)V9DdLS}S{7&Z^5x01`iqm7&#M=_%#X!Sf*3)_%an}K{nw?C zr|QcamY*^v?}NmAMf37QtsIm&&KK~#EDB}Uz__QHTN z)iJIix~EFNg&Sqs-@BK0RS2-c3Q1m@@<$d2=4N~(0U^+#DWEX@#V_AVYLB0P$ zqiFOn8ivDO7bb8?slC!bjL|Rm^(@_gS4-F1C__F-lEci}a#iY88{Z3{>IMQOsysp$ zgipj6a^yN9bUuN*R*Ui`RY{UO|y#r>g|L>>uPe{ak+TCc8jfqTokFLk+rYh7;SVwXFv{RKWN`M(!)eVq$n*75(tQ9J(Q z;Ew;_%ClkU|EIqBr5@e?*|%SAHTI+)WL6z59iu=03qaQ-iFFHEZTeVBPA8=#-g&KS zji23Ds!h0kZCf4NME9aPXje|EuUorkcRxsRCtt4ftmgmMaQb-9YW_buI6QjX-2ZCD zSLgClQ0L0viwT!>N@K+>=q!iuy5CD8r0PU<$k}E7Z#kN35X>y~-qqYpVA?EisPZ=b z(o@vadw_bESjO|-J9|8(3SiMlTZMLGBs(#3PiW(SEAbtxb_gsEFAG;focE zst$R3%z(eLT;jA~3FNbnaZ32cINB;SpLLitQq48&3%#Dg^C8c12QzDQg41=XRUc)w#OkWmC2URF3Duh*tzENrAeN! zF<~be6^eW~nNhd}Z`FXo=KZQ>x9>%zU|nY*D=0f);2j;Jo1?MOQft880Xddjb7T~N z?b|JshAJe8lEg)zP9OqxW0dqRME5%Dg1ZxT*Bc;~ z?9i&ciSx#FHV^R5SgG-hSuVLkXLA)`1v(9gyI`il{$4VPuZfWlX?skBO?I?93I@b3 zZEHW>^%{4+zHM7Kw*zlOw9ClUhqVhPjN_SNklQGNrq)W?U5C6qW*0c6$>&fy)-=3B zPk`NHDRDgC%8c7&m|MU%#!sioyth8Na@#(clUy2Jv=TYV&PmS61tpg?ytE6g_pS?c z(>t|h;v`m}v z$7@P?J@+U6?u^QsXRZH#*PtKoS-1at)I9%pFdW^T|G1r};s0N?0npqY=yb?8HvydQ z|6beqzu&wob1O%F<(R*zu%H`qyLY&Yxw`+(W6mg|M&6X=UCr;QD4xe9nNSdnC+vFR2BkZUl zO!&(a!>elVx|dz6UT;OJ`>sl7U}&Gh-d4L=|Eo>5WbR&zX;QxsbR*#Ah>ot^9H-@& zFTdK{941lY^M>VKL`_G(UD;OEuA%iy_v$B>L^}h~Zs~rGzQPd0P4qvk_3%#?|9#ZX z|NH3CUHtd0Jl#ykSYFlvup$L$&Aw9$A1<+}zkkclN!}&TY%{Cs-9$Xyu6g=aoQS#R zd6?xxzHJJL71fufVQAkP(txtdB_dtdhup@+A!|_IUy`}#&Itg|Cj37$y3?oO|D&VF z2d(ozj}Pzszqj%%wS}XsZ9(`lz+!_gXK z&9e7X==;X-zIAW5QfaPjZi9nKTQ+Rn2fwiibOFZOnrFT4gmb-BI9foP_e$H3{s8)K z+PP`bpGb0=t;s{XNy`OG-nJTVyu42Pj_?}1RlAp7(Tw`Ct&LzqnyqkVxh}cYP*1Mf zdhO*HbSWA(8?+@Ybc|B#HnZiqW7niU+ypNR!cU^H1zKL-*iVoq);3!JVh=*m>ry_q*P#e8iOm7`{h#=7018}v-CinscD!j&^!E9m-Id%aOd z>wq@uZD-p%cNwnI`iZZcOjpKF$_#Hd>Z7Z^QTLjS?c6i1Nch_6D`4H>H6 z%+CFhsn8h;R%)8Akn5FuwcCVN5I5?qD}J++IU$%64j#1cXu5J%n?>C~G(K!J&rM(( zjlmKOIyw&;-`Xkg;At<=)@bNz`<-KP*0`Q?i7weahbuO<8kay=h0rLNo6o;2txR^{@g?e$ z?SP!k_kULJ|4HHUNQAp5gIX8=F>K|39Ub1?|9m^oRXhJ{XZTj_7{HRSkPVM(bZiFJ z2<@-8GiVYc`RC{$+OpSMbM_nF%!q7P#RVi5x_-{=&5qGJM05fTq5ps*dw2QuzUF5; z{=eH{z;*n8^tgHd_rc@g!#nPjWd2XF$8zn!0(=IpOyUof^bRW zM*g+!ZGd&>|Ax){uLlPQ5AW{(xs}I1|GnmR2QOV;#n*fLl?83d<%A{26}cD*(z^Pg z6PytfKX*a;DuM|8>)LDKOWlLVNzAg07?`n&@j{iWu}+7{Su z$YuBg++|O^iq$)RaGECt*225NADnJ$K1wyZnxtZ*9-GHe5_)kzh&um&jpeU=~+jYbCRP;NmICRw!rZ@o)HlR zee`xt1ze%axD-gtNt&Wr$|gEKo943zu(K)Y1wjRtbMHOQlc0}sG6N$R?G>C%=?9W% zw)P+U5&AJt7l`FhQ=xqp{p&djtA$Xw6arWC^kxO>0|I-)8&tJcYvgD@PShM~g9vn5-|IwZO zcS}znwJ8AoK++67n`176;AJi(PE!H*PXE$T7|%#9-|fw%EW~&)m{B<|CvX_7Iwmw| z*V~VRpx;N|6Nyeq!333zFVM?;%7Xv?k1#}%e2^*4WtdQ*bTJf${}a4_|2}59U?~Zd zkV2dk4B;g4aG3Xe0^vg8GGx5&vS}pPCyS;wx_dc4ClwC%@)&{~Lv34v>xi!O~ zdi_&QSNFTjyL+yC0-}kNj9n0cR3SyM|8gX#nl4yQaw!lNg2fa|D|}Q*AOP@#BqW6j zaM3z}WcE?5>kf~#op&WS>3%We7zFOJ?n41`gVXMVUEdd7ZhcSs=rx^j$SaGcoMlKz zMcrLEDF`dML=gNhD^ZMd*L#|WMT%oG4uVmHewh&oYtrx!norrgz5a~IuzDM+xBF1s zb+%vH;nqukA1zqP5i!~A?8Jk;hDu>C-p|RUp_B@IWA-FN7l+X>8U}&>zx5M|E>?SS z5Dgz7L5SwB@f_zflCs(M40SL`*<@hw4iBQ?;MotyZ@z!=>c{V+EYY);;osEU&2^3! zmP4bsaQU`FK@&ynZ|B4gcw+jSvr8yFWy%g$9W`LaLGTTF&RJ2A#HxnS7*N;60ov1l z93sY%GI%+Hh5CC*IZ5_|Z;+Q7_?#@zC03u+z$a=j-S&0xP36ApD(GETpl0Nm&%=TM z2>ux)tfhb0>4cSrz9mzo^%|~A^VyQMKy%620EppNSQq`cf)~m;(WDek0A5HPy!{Bh zWiZK#FFEF_SUxNC*-9bf26h>-t~%j-+=c%Lfe9LQZgELr!$4NR_-9 z^?K-CfKdIOc9#UcgE)G+xC;Z(zTX*Cm;U_!00030|D4YEm;hh_0APRw-T(jq literal 0 HcmV?d00001 diff --git a/charts/feature-cluster-metrics/charts/prometheus-node-exporter-4.39.0.tgz b/charts/feature-cluster-metrics/charts/prometheus-node-exporter-4.39.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0750218b70b48044207da4c24703a4f45bb27447 GIT binary patch literal 14202 zcmV-=H-*R_iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ%cH20zC_KOU6xdSci``>Vl5gqJH`#04u`}(pbMdj=J!e*S zP6CmTgf>aA0Z_Ku$yxg~_Ur8@ISU1V1TVVy5@$MN{zxnmC=?1np{h_-$T&-goRVDl zDT|0tt}@0Y;k_4A%w;ggN&JVqKK*{bzrVW+|M&a-`u~H${_Yk_s|J z2N@2h#NQ73ZH3YcF+`s{gb?ykiW4$KFn!P&%XvuP1422EhiEEgCWgJf zhAc_)l*)N;O5(&16%gU!jQ&ixpe!At*|vwXtoS(yz7Mv&h=`EWOv0-J^dpHA1a;Av zaa6)bl_~B`nZk4_Mm<&IL$N5l3o^fCJQ73CuL1L&c03;y?-NW@iD^nWR3Ztc@eoZY zPI*3}?5`8`Lbd84L%QNyS^PJgP6Z{?meTf9S(q~>1x_Q$2@!wIn2>}85h-gBPNk%t z{Hh|^) ze|N9Hzg_45yW4|D{{Ik97afu@&SQ!6fY&%?by5Ug7oAV3KvW=%{&Dd7l|N=Y!BUb4 zjcH6&fkP6;m=iR^oZ?YT1d@;E+WF%9QKbUc2;aEpgq%n*MDN~v-C|Gdz;|B(^97XX028)&1CVhZ$Bma>7rn`o5#fl95sM=1 zEx^-aY3T{|f~8|R$vHt6`H1*@ghM~$>}npMzfVbuNUBzEw1q-W)G9@|UQo50aVAM9 zVZ~KeBnS zCoI}58c+bqmDJ!)$++yuMZr0hrW3JH^duG+qF2z_&sgM#7Ned839rbN>~#f^zMK-j z{zYw_J}#?z2o-;yRS|=U2_s=iqep{F#b_fSi+b~hMWoO?N@!>!&xGUzCs1coZD@?k zA^LvM?>E4zwV`I2`nv?7Dwuk2uvMF&ZplY5@SGFIWq^*yNb;O)6|c}Gjbk-m%wk1q z&#E+`hRy6sh64nuxu96!KN?VWo!6QNc7Toy#}Vj6qg;wC1<^)?rW3`wMuvEt;E1L~ zC`JJDvw))8`~LgyzyD&LZ8lhT`K0o#g#esbJ*{M3e?VuEJ`NkuZOcz(!I5mJ^513a8E#hdMv%LUpUG?2CjXurKWIzNSS_^~P4O)Tpzw{hE3n>}Yyhw5&%f*Jh~C* z_G&e`aj}b1QOv?oG4AfBsiv;wyrz!Q;gtXks{D&M7n1Pf6J6%`L@`%Rgl5O8Or=O@ z|2Df?x0FLIO0cZe4(+T-!*LY%QY_6`DFR)RI7S<9j+ES2+5EL+*F`@Gf;tAeS|A-X zB1%l6W0-~$MG+7$90U+cil0eY<_ zHYa1kK_y5L%SgJY6~yPYITQE|4V zH%FUxc!SF3k`T#hD0EA}P*bh`s2L}mNeWw@Vf3l_bBNMBj?IiNSaVw08-YK^R7e6` zNeMVKm4uJgLidpQ^fo<+B$*UFQ$hGCk(Z2L&~(y^avb~6 zfD&BvcP0F0s^v5FUL)R+xK){#IH56Bbb8l0KAG)zwvhUNxAWdDFt-A3=LDu{0l8y3 zd2}TSS9FyekH<7b8C0-6MjM9PY@t^y#POzmOa_V3Q z;>kmysDztWfWhsM)fTIV!uDs?S`%F*kVSU+*tu3N7>fw8yM!_LaxxCS;2yAO#Res^ zfW}WkYe!BW5ie^JR^>tqvxi^|D{4Y6;f&bTZULLQ&T1tja_8{ z>LHxYQOweblF8xZAfc0~HrwFwn1m80@Bey*e$6%3dau*k6E>}D<-sDkQ1sDC3&=QX zjCFfzCnQ&9BSLIvx-2XH>FOhqLGraQ!yr7ST19fHRsUighhy@h6;Kw z2o{Q2x)o;=c6Zi=%d~A@)sPB7NYSaF4=CK~Z+Oi{zmQNLEiI6S%&>eJS!lHoq?=dE zTaUYBa73n$?>HsK-1G|@=^aU}$m#?%%O%T#s!peh2MeY3GZ1$bLzOIKMY~N=)XK#j z#iA|gPq5Uc4ZSfLc^+Xq`dCTLot}ALH2Q-Tsm+NbP76K=x%2%~!P-^uyp)}YE3+r`l!wY16r5%P`+hG%yYXmvLkRBAwl zUd0S?234QtqLh%x9(Nm?3>;_`96~MY8k^B`DGZ4@S{Z3ZX zL(-d&RMjGpQ_RO1Rcmc~S1I=j;|nsktHqxUe6=L&Dv+j+L$LE#XZ3wTPpV7Ul?p0N ziF5kbRH4j*%;l7E`kMwM6lX7Fbue7I0KTLSxsWlP;W4RTsaL%F#_7*nRdl;hHhwyN zbq87OYjjt40NQ7&liQ9isZUsxrb{M(n_i1ob2!N5!|lPJ_X2cCx#Go!X^29KD2|L< zMz*2>>$Z-K4v3Vn3p;fwOO=YXQDxw4K*<>TK!B3Sq4WRz*MFPCdCSPGeyId3rJ4ur zT4xs-jni2EjleJ^1sv!{Tk~7>MZ69IH7&Qm;zyP;u3ig?lT3gGm9rRRQt+Q}+Tldu zU<|Obtdr@3WRPu*`lt3P-SD*9fU$ErjT=!RoH8bidyQuL|MOq}UCIq93RxUeJ*VMp zN3%*tL^N$#alvWl@1`%38PN?+`?rftP;aDxBisEbWwz~*>5#Jw6b>yJVoqR8c%G(e zo}-PIn1VyYSssRjL?qg@3+%2CRX?*S zHj==Y#**+;jhNGemvCBjN+Pp_ibLjdsLcc_+#>V=7tjjrTwNhJ+nVB~EWv8ukK?(L zZ$)uLR+xs2Ryih@NNE=l=8-F)!hwtkU@vBzPH0-LwO`l>W~uRoQGlsHc!p^V(`-gD zOz00EWRiU-dFh8=*ysZYV}*{fzC7aisey}|tjQ%cDaG{kgMPixV%9~kU7Oh&l&9&0 zh*F$tH@R6!=u4WfYIZBbUcZpls%EY-o7X)VtL%cg<*r_}RV_yC)|RUKz^}znbpuS- zHdEaPs;yS4ZWq1Cg=7i+O)FM;7GX)wq!NiIb415VyY{MYhsBBasoKWU$xravL*PwwWk#+71Q`(JcX=}NKJ2ltDr z^F>i%(5$f22j7c|e^hyX&6T{wr9Gdlp5r-oz_N*`#1g4+<4+EKY)P7AzZL3&Vl zRj~x!wK#&RIr^yv#^P_B<=GJJ@9pjEszsis2d(eku=JEMSr74Ev1a_hQ81bD1^g-)pjp)RagQ8iSX z1wCa#B6IlfsUOCpzHkHoYQ+0drweEZfU|~Lr*Oq&b+L(aT76@chcBwX9Z5|T} z7`dVXtk~w271b2Sg9CtCb6^kVL=hyz6JP>{rX??B9O`db1(wc8JP%54X`)jw*T}tL zDcM3lG9k}d;SQ{~jvuJjZa|&UzL}2CmOxDVae#)N7QhNkycFM1T8_Bg?f>o=rH8VB0aTe6`by5XXeq zjz9z+6JIi)%xEYJ_c>tZlH*lgN?*9}BI9@8tYYlB7#P~euDMobE`L}0-TQD?hTDMF%D z^sAi3UDitTEY7MBM$`F-)5uP7#bMmiYN1r8Iuk>sK;>6jI;?(FJs2XKMEkq0(lSx1 z6UqpuOzYq9Vw}fF5}wf1>~h9gqjKoD(^m*9jYDtHj)mKED*sIZJb?zzNXXJiST#km znDBDd`-s4y{t&%khA(QHT*;e`lyD(n^=dP)5(Bjpgd;Avm?uhcoRGpJ)?2z&>+qe3 zjPi;8*LOnIdlAu)iLcaWmO^yTSgT__x67|}qbJ%b+8TqVM2{kfPPl73wn>A(BOr@)M)row z=)ZLvS`yRUDgHM7vAVL>LO$u0yzzs$ySr+KuT1f{FDW1VBj?sG%u8 z)`GVGqQAVxSve#N`XE}PzW<_MFRvXc5;(pt;0azRzm7(zb_n~U79!rh#)Jv_1Vfbd z;80=$xzc*$|8GuDw1H)-FtcbzuvIgiAc-%^SbA;gg(Xmp?NE3sWjw^ZF~QRrjVSh` zaFUeq`!LEn#?}<*RSFCT!EUgli_P5CW42=pTqao|!#afSsrQORdgW<{4vn8fDhF$4 zh1T&en1*kehGwd5_%W>#fY=l*$+7gp~{_eTw4Pn(s3f75uYW z*w}5l=oOuj6e2Y^8^Jn>&6^YT0!)5-A3vXRBBm^khG@srO!RL=YO}uj@gu>Jg^ZdZ z0|vE2661Lx?esllB&_1qK)n+=c=k9=4%DuzkNC@?(1TMOgj#l?h)ex_^uda3Gs59G z7yO(`wGD|p12(iQ$B1VupRnklsM;21qtBqtv5GdwXDroL0uXbx$r(rE|sgqSNw> zOXsAh96Hc&S|DHzHO`4c0c8lzl|)DZM}H88p?@Ew5uD-~f8w%=JuPVz^c-WQ2{OFW zeupG-s9}d(aS}MaJJ3anv%2b;kv#w!^zD(6+X78jw?NaIv;eR4aby`-RwZvjQ<~t| zIkV;j6cJ>vms*hpzfiT8k2#(lNN!l%C;O6zOL49`xE4Dwcr{`R}?l zC+3+=@p3`aXlSwryvA8^%vgF1YoVo)lMR|PpQ}aTix2Nm2S@=nuR?@0hvz`JEZZuq zl1f{%+_NRF(N?EH-1HeBs0yE3$N_2ow4fixzA@|{%L$Gh3>*MQbL%^)J`0^#3oBvIyT0}z zw}O^y1nSr@{0<8AP94~|CWz99bZSNiee?G4=)=j|)AQ#~HZJlJ38fOEB=Y^zP5%12 zFrK5%!fbR!`3?%u=g-Jz=oi7#49n^B&UGL%5<(!ic-Ma)*z5qGJJ#SW)@;VQDd<)< zYdtwWsr5ru7Wdy9DoFwz5faN>1jRwNdS`xohWJzxPMLMjeqwMOJ}Jl!gGXXPsw4P{ z;phh(D~XaX8%2bWVhBTYEFl^U>;c*{GofiN9ZARqiH=XKY+{|J0?#>R`UlHFAic!W z`dC9h)%N8~#HTK$#Vkbr<*i2XX*r5#N2fm@zi1gq0DtFUybX4hv447Phdg%Z8JNrk zYV>LL`DtU|%We&?uN&hDJ^%c9!CWZPxva6APu?DWcysXjs75?63*W;ch*76K=IQzR z05zCCP2mN3${PUpSw=pu1 z_|jpt9dp8?$fhv%$BbWM9^C`!p4gGmNn2o~(@2e-W#4veD=BpXqh_me^?ZLt6qm{R z2S=A>exae~8wX|K1Zz+wP-+kLGtWmysmM7gwbL-BB$bt*Fky6QA^&}tk~kw=1Two_D&Xbv-(Y8NcTko8w)=Z~JCE|; zLp;~lz3pw_a^jpbZnmdHCi` zl~rs>38R~VcRa2{k(Ya}PFJtJWlOht$QuLR-wJty?fR518Iz^9!}aTlhAj~neqUz* zlqg~Tegd5tK3{)?OB@TbXdv~yS>suGc|+uE`sY6i9}+-UVChEJpKtAdR%W3yylM?u z+YHuDhc@U}moni->kl(jE62FrH<#@CZyTBa2~3De#OVB~gE}8NH(z8r*RuABx&g|% zg2s7D;v^8$9&}@{n+7WrIDmvF@<4 zhgC*U-#u$0t1(sbO+1t{$PHLU1*v@)4u35O5hOK9OgU;3Ofd-)65=z2qIm?NESIt+#;XzPaB%nF5 z?CL`$w0&vCGSFPTcf-zCSF6Q;7XTV@;j2N_(^L636 z@z>{3PKK)+5He%Y*Jn{>Cc(A1)Ux6a#i&ZUHJMcT?U6x!c?P9N%DU{jp2{sFvrwmN z_#ih}%99UU=!wo)H++ts6pB=%W^JA8I)#Q+@%k-47cX_0()+h8V&ApUV;v0Ze#;Mq zCLLERmyXMBJFYu(y}4?QmFxFw8~9f|Yn=a>;55!+a=YswSKj~I->aSf40iV(?|*!d z$E*pQMreclTAK4lIJ8D0$#b$<`PzD5Y|3e?Tj>G`kSggCgNyIAp>p6Ew@5v~PFYMm z*Cp}>rn>48Y(ljEeM7v}IloRTqQr!qWu zN%y?M>EGGi>W5~kaxTb`NvNaB)X-d2)YUTY)H{c@n@s_(e%F1d=GMAlLv1HiL5Z(P zoqsDQx5r)RuFo3$zeqZA+sojW^Z%W}_I};}YtVo8=>PQ~kISlYmWh%pAHuMo5qWPi zd3Cj~cmD1!_Iut68cq;x*>!Z$#>IWAu47(pfoyqcMj&OMa8kM;oyzHz)0uU<)ASH2 z8{fB9khbgG-L0ghzygZY(xyV8X42FfkOLJ=erOXCSMJPcU{K8*g=`2yb3~S)@qJmX zf(k;_8FjZ=<+Re?kJ7uQf^Xf;W(hLECI?+2 zdbM_FX-=oI-ip*uE65lwAj%Wxrdu?8Ud-C|DF?wPtzj+8wG?Mb6$wcy`3F$(m~FU#*Gx#32td`HC_ur$NS9I2O;#oR96r#9S1m#9UMg z)%1{BKF~H=RX}sbWC2_KXalZ8hhOMR_fhBHpAUlFpzrrPXd`4eCL%1|fUQ4l!>^>x z`D77Rc|>M`+Dsh8AP{BcheU12%xXEfgd%fBaoxeC)wHz90iJUQQB=>PpsMHJUMe9ga zG6FS-e8a`1X4ZfdjGGoI5==K?D|)Sq*TJ;ra9TKDT1wXeW=$!s6;#$P6q>Fz zl0_|qQ!iQvRnIWB5Ky&X6-+aOR{1PlZqG+N`a(MJapR7vD04xKh1ZZBfErMZETJE9*P@ z;-ar>5~K05nm*H|yIEH>Q5q`<3Bm!~-K*+|d;_z&vCf0~bZFQJ_f<8eqWhF-(aE|AjXd|e2WPd7L z>SHS8X5)7g>*6l^0%v3RT?0u|^2sLZ)NE~?(xq^rt*%qEB|CMNS(1HMnA%7PS2M<; zeNgsk(-pP)OU|C`tJRuKo3*``RLG%l)8@URmBN17HaS<-`DCfG%D@U;Z?gx(C=$PB z5?+*Us|7%w3CRggo;PwvH8q-Eqc)JwFA^>Js*&eTb!3lS7|^T#wM z&wE6Mz1nD*0zIc3c!k_ZyljWLL;zWgh*N3VSxc1^%THdGs_c~j@mkwM)Nz_@4F)b5 z%rfvcQup)c-ikgr!MmS74_6eHc!EBE{;Qbc?Y;d~g>@G@bzP!idi@>sywREZuG(rw zUe6}n*hU-H=jQ2UyA65`i;)se+wB}v^W_G%>xJiCi#vW(#`w`f<@C4Cn)bv6Hb859(i!lo-?F>+L>tBos;li)drQqdd+Rd7n;3LlbIuB2 zkH(&RHuf|+Szmjz6^FHpPYM@?!F+{-!&Nh*isGFQ#GT;kn>Da#elL))8}3JRj2y?U zW&rt$0P_M?<1*z+vuo-ym5BEnMBOjIUg;leI73yg-gP*q^S)XbuG_5@JUKqB7J^%L zYlV(aUR3d@ETrO|(4t6jIcSBLcXeSau0Slyl(!<@YfF`j3X5X(3kmP9y~@SrWx3jY z=QnlAbFLgLOV#cnymcSUEo#GtR$DWN7q=``*q1rOs+4UvN-wSN7QMOQcY)D&X^X4) z)Sc|9>)6efVOQ3(rP{U)*G^i=W^NvR_g$McbRFJ&R~zU$G=M9K4k@qFAR!UWlOcL- zFYCE0hF@=}Zh>>VHFg~|i);O=c&@5=8@e`4vFjkHt5vZpuM)fkjyLf`Sld*yii|f7 zMANO++PVJS;EC1>+AZ2uH0{@CXw&^HXn3g(?CD&#^1=Z#`;gS zQAJ-Se;XNKRsN6dem(wsyFb`{T>lU8)b@Wf*4p;sDh94BU<%Q6(n#g;#;yNNNC;2X z2pqTDHNOA2adXX=uI<+ahsN71?XQeQXAcym{;1FWvpj3? ze)R{L)udje~COKEgQ-Laz+tVdkK!KsU((fDy`efFKh%bfEdNdwBMG} zS%H>?RqkOWITqy}UJB9agE=H!kv(Ni`dKJ$O6YQ1YoXwNg@P5|r+eB68@7RBuC;8% zkT|;B{BJ1=tvlJI#NqU{S$#&Fsl2;!>X$WJ&RBG=lSn(Wy!P_OZ7cZtR;R*%<;qlR zT3^2@w(}?m=bk!@agwPd4Seysvijj()B^2wi69yuPotH~|5Qrf#wn=jB7p zMSUyaX&WYuPj|w{c=<}1ZCB(oo%v!NQ61`*0LL_zgrDj=)GHt%BiM3?mC6oGxN-TE zM3h6W(3>J!tAC4Ku>I?-rnAQ)c+skxi0fR z7jAh@?&r$8Ok_vK60i}xz!@IVm`X}SaBy;L6nzo=h=o%8Vj9kSv%!c+Jm{T3?gH?H zHPLIErLylPNM1X0bki)LrsCrXsjF-SgL;f{)Kvd~VMFZ+-{B%JaWL-Tu3?-+%P~d64I; z^8fq2C*Yp8fqU5n?tWFkUHele@mE{Kf;9ejGpXPGVD@nr`r6NW{QtI|pZZzB|F?JR z{vU&9&j$TR{{IlqKMnt{)+o;btM$GKmy&hvBCrJi0#^}aoX%)WCL|haOO1z67wKEM zL&k{-a)O8$5qPzrCm%g6eEy%>H#Rh-c#(0Yb{`TIZ*=HF@iStV=_TgT!O3x1NzDZz zNowLxUc{It#aA0KqFz*D08M6}B8t!Usfhh^3Y?VwGd@aaIz-lsHo;dzw7<8vv!~Ro zJUtLU31Uv)y6A*(xNKEUNf{WABvU2zz;VW8Nm_xhK$j$r0|213!XbKks=ywk^QUm6 z`HH6bm2KdBN`+Yi2n(U;t#8+5kpjjE$^}g)=pf4uc)~d1SWXoRQ=Fo+BY?HItvt(O zl8{v5*eIoL(bu_cvC=}#{-3Qut z<-3M{-F1t$2-R~H4bi7R0n=zbN3|BV11UKj<y`Hr}+J#8PKdwSO3|JEJm);i!S{cnF~yTo=C9Z$cew@$g^b;~(GF_3|O(1{RDe%?@)MpGh2ERD-^0_7W#FrbM6P*K*smRWEBa z(J|lVEs6M+fSQ+iKx!xRZS(UAZikA`IUbK`=%x^9{nU!YLLikv6 zR*H3uY4`Y~v)L#S+HbnS;%7CjUR&voS95aYYq&MKc_o`^`P#F^nLci;=;a$00o)GS zL>noSXag(*jnb(px4DU&EUWfyE7tbo62BP<4x$Sol%Ff!WR06!){Mrj`Xt}oA9(kx z7{u4#|FO9q@c#9`-Q9-&$6)8t{`(-$n)ct?6_Ms^{jTr2cwi5jZYd?KeR1?1w$DWW}f_k%o(jr=pr>1#*-{ZoYgCI5tRf63~g zzZ$<-8TgkhiuzlJHeV!I><;0E$?quEaILv_|6zdS4gq*>B;69?*W&Ni>F8I9eM_2w zZ!c*~TEpJdhdYJSBwsVCE%~wmUCEb?=1LwailPKP`Cf=>PoEw9O5Uf=CfcxU#7>kG79Erm z#pn+Whrd>EMhp144bnGp*wjooX(aOoQ*NgwCm~A`l9o5}x^HeQi&>unb4mi$or_;| zIFgmqiVr#gaixQcwwCr@C`g>eF^Nu__lUH7yx;9^6;8&m)Pj)ya`PR&y~PAyfuEb3 zdbxb}5PeQ)+QguEcWVsvNs9@9sCv8LEE7HB@}k%JAy%s58M%8r!kV4Vvk1$|wJPqL z8!n;WyAZQ*{|}}lPUs|MoZPjI75jgGZ?ISM|KH!)e~kZnkf)1Iu#|+S0!fDSc%#cH zNzo{$F}UAmIK03UA_C92DT_RVlV&8QB#zM}W+QDRq3L7`v=m8a1Txb&?{OM=U6c}Y zHSI>m$(UY|NH5uc+YAs~a?4VvsZc^0;V7ml3B2I&?8BL4oOoUIf+Yz{(a$f=P(-=# zf(ey9_*bLv1*6}15B{|;rjwrf$Nm(vv{wQe;qW5Q(3r+Vc;5x$GV{I*M)<<}E|5v) zefNL8F8UdB%5s5@508Ww81Hs3pb^16U5>L~evcer;rc&4IyiiN6eQ8z+gP#w_jjMw z(G>lS>Ght)`DCIL15+D?O27&w<09}pU^C$qbBRWbNg+AT z1i~(^?<&Z?-!NCGPNOUIK>mf1JCPr(cg$fr{#^6$LW}PfB84xN0MC0 zn5NQ?r~qo{``=j1CUfNb|L%SK_%URuU@`Gj6NrS-496&*3U6i%r|dz>tPA zK-LHH3J0o|f>ev1_u-5X^se~U=w~ltp@1Lo&j(egw+XXDHK-;=#KIgxP9W0tL#@AB zD}*j~yXZI-62~!mp>XmVou=mVyVIjrM+av|AKo0iK6>9ug5DyA=Jrc_gy;G~sP`LP z`5XkZXnNd{AH1tnCEe@f}{KIdtBuXR6QfpC(r84*YoQd6>=#;WOz zr6iRCVIf#Zv9voagfPL<0^CRp6(Gij!~_cxRJvZ^v9j|;$#uG)O*jVWVo~>@0J+nU zBi}a|H72zIzq(Jl-0B{6(N1usM5x{1%Ja@7=2G=;%vpkF<|K4C=m-5RAoWLuXu(xK zngOB0WiZZgmO6f9Y!4;o)q^|0KV)f0GAVj&M)-`9%bv;C?W-B!GaVV;3qk(zHNf6x zfX=6cld+Nj=PXB4JR_<{dWrx}CF2~GcMkK-qsDpXBpvGGT!=b6tht;!8Gk1x)Y^`Ga4jO zw|YLhi5N%k%7nLHvgI-z>*f~9V6h{Tik!nZTcXP;p&Wg*=ZPQDn36aW2!|o#YR$%T zRUD!d$)%Z(fnEeJ)qLO9D`7{igcnn|T|yPs8NX;c8Ny6~Yr|{z)tOjL7%a(_l=Ezs zw5SO0)E9TCFtLZr8*u0lW7+;0Z5a642l5+5@J9e$aE=Y)&+t zEMONjm7Mip<$eIx(G5#@zMK-h*ZO+RSe~Rdn*g0NBnSZ(UhGP0V`Q0j76XU7Y19ntILxUw!GS`aTGEo!2pLFf!vMY!*TNIk7!$<}6oC7NjE zpCgXb@V8#APET?|dI?rU=-K)`i@1-A2td92CyE*9r6_{mqpsifEY$x)T9prcho9WA zjaBx)K_mWau=jZX)5AQz?|XWjOAn_pV?#8;+(T%De;cBww=l;){Zo@oBulNf3KPU?=k-8VIHHI z54~#m^q&X)ZO^%Dc>8g+f1A&;^>60Jon?TP>wmwY|3B+L`u{%2qxOG?2e^){jwE1Y zfnW3d>X8wAQ_mXf|IT8-YW=^F|8HmSG5^DZJhk=je@iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBhciT9UXn*Ffs4I2fwtFqgkF+zZGkwpxV`tiD635qey6>E9 zjvFFN5@Hfy1E6h9l6&@V?BCm8vIh^6;6t+fh%?y<)!BbQ-%;n&U zvY21V0bXZZC=|nIGa^(tCu#D{y`Ig@&CT5>Pt5cFyTCBU0DuvV zX;Bu~pG!m|kUSR=8n|~KpPFYl0+MA3jfkdia9Pp*$8alng!x}4|Cc1mQQltxESLZ7 z-QA}<4f%h%z4=xCALZ%8K2AuUD6q5L5b73Fgh3z9W>f-|K;UnCFJBBMT%<%PjA23( z)D`wIN{B$XB!ZH0f)W&mF+xV9#26@39OgV1pfJr6qELoGKvOcsQ2-!tN~IF>5lm;1 z2q`yZ8JARXUAj!VDhUPX!>daag2uZ48G`8m6q&-+j7x;r4V+4N{~j_ynVLZF@;ljs z(3-xzQX9g@j{)emBk1)4fGkgvW1i4xK7zx^5m(0oB{CHN#B_>MnR-FQ*@R9<@L>=D zL_A3_Qe3Me003l~Ht`6)f8$V*Z#Pm*xtMQ6+^Meb9bu01Bss-Mppqka^ET+iz-g_X zc|Xt?4Wy~jGYYI2yuoCWCrPU?m?nHol98d+8HX7r8ALqIh@xYfP&J1us%F5|429MC z60T-6nn5lRZ0oRzUTDDWB?-R*q^2ohK!hN3;1hW7lxx}GBcvpguy*k$Igo0e;Q9ve zdK;+(Wm5@jDHo_QOgYoormlhQ(jN^#i)yaCAX|mN*1|X2<_#CO0^bF=4zhDX0s0={ zG9r+vg64_?cN14i2h6xs8sP%qNhvmZY9)$G8X0-AX69LvONHX_SaUBIY6Snf^(T!D z<1rrV0{7y`5Fte+Xe0vw!WdV^JjoHf{}=>)*o$MT3rGUUE~(%w)!2ICJ|haQXp(qR zueg!Im|)78L=_;rGywa3Een?;^9R7Mdnbo`KfgE_S&b9~mAq>dQ_i#=Xo%2+Boa*# zDRFy2#+X=~^$SfanUToxzAk#l$EH$M(uWsRD$R2lk0CJ-H0C5W18`cV^f5d#>9)&V z0iK~Cii^&&al*$#?Xe8^d6a8%8j=r5M#D7jyXka;H{E4CJ|kj^su<^+w{{FNnqlF1 znSwEqG}_D6tfa*li9m72FOWT_30h*RL_Tx+N>8&HQeSJ^>-lSujNtXj3r!eei|icB z8PAhg&vLHkSQ`%+36fDZ4IKoFYzd{Lbfuobjqar#9Z0l#(g#qKRSL|*aZf1{%?u~& z86h!IL{C>tWuyg1bDtrV4B-U-$f-aH$Cc-LxgGBO7;XY#F_1C8#2W6zK!54rT3T7L zDfrG52{i1Y6_pccaO`o?@rE*m;*uoJo`5AAp3xL}u12u6DZLqRh=y+d!^&%iz3NX* zeJx@Qa-N{|R@VwEqe;_qp!7sVTp*W*T`#wK>vquIy1<*VQ<7#0zMv^p+I~`o5;7D6 zi#;Uf1!utWbd192Zu2GvX<9Z^1BoKc6vj1r%c_qNY?fU+X0LTksvh;g+r7GJU!hl3 zf%khoUOKo!L3BYk(?Q&5LU_fJxz=b@`#&}2vt*XAt=C(Y2^n5#SIa}s^t2Lo39_j+ z+9h^c@jV)f+IqY&7~Bn6qw^mP+h28WZFKu^%;Q3Q3SO%Ora%M>L*aZLfs^nP+JHq8 z8&=2~G+s-MBk{+OlDVkUodH)jP8 z3M+^#&N$T|*0we`H~(cF#yF!|nS#b8DOHlC)7cDt0Q76?mFL1x7t~Dfd^&5AYep^+Vmg_i z@x&~9TKu1Dq?uHNMY!SUWtf$5rOE8+VA~D|n?u6c6ovDjQ#zd~1B^^27%9WQ|Mvy_ zkz1^-xzku1Q7}+K*^WFv(9&lEFr0>3GL+>(JAAXItokgCIS^^2h%!Hfv($_0LxJE3F7h!BJVBC* zL>I2saosxtB?yxf6tYk>d}2oX*L;kEq0$ZkkAX8s1#1q%g`Sq}#k0e{aa|~>U8dDS zDt#eMr;jXQL+A7s1N;%(<$akEfV9E5g+@Y2f0J3h8X zNlRp%0iC#HS$I)SOi1}qR}UBZ?5D=#)jC!2h?o7gz0txo?rAMukp7q`BOuN;IhP(t zG5bV2&b^^4FFXCY@T!5BB^zO-9yBY4T+NUv8WDwN^=930znpUBOcK1*3tP%2yo#54 zZXe~tm`8GGEd^!MVNHb%`$fkK-2Fb-m8Rv*+#)ED#BLz~O0NWJ^eQ>Tn^YTJ0EQSB z5glqBFR5p<)DT*%8*0Y#QWT+-bey2p^SXw|bg!Hh3>#~yIv*G^7%PS>!r>H|ZiTT| zZ9p=rWo`S(ux~NGz?rS8nW~v+}fCT&1`KEZ2LNN z%RtcnuCdgWFIBCr-D)O;!-?@RWQGx)P>ek~|JQ%~rwdUPimX^-Y6nZN&cp77XCK_6 z6UaoB4vLX#2gi(~tNV?n5U+wjW1(AO@e5~M=whiznn^8vO7O(Er6!8#^&?Q@pk(Ro zxi*=N!W#G1tpK&HvK`Or9gIMU9R1Fns}RjNmo8>(mHvPI$A4CS12chKYI)kkN3E*# zTtwGb;VyXn{N~1@xO3g$m$S3u?a~NZLmGO%-7iXI+a8(r1<%Zy!+M59pqUeqGp6|* z)}9lZ=&3o)qX;p^c)bv`1J-u3I}A^U#*H}BYcM&y;^HFVB$h*sg8pZa&)ufC=@3aF>eIVxwe-l7&{Rv)GSt0?HEYlA2~`TVPcvXTEWisHHe~dC*dab z4ty^wbMC5vq-8uKf~2TWNN4N?Nu{q1T=2%~bQ=rIvc!mZ5z-jrL{qVJHM-22)}=fp z+U_UGT>H00(9nw;xrOBfuRyOZVj^PSL7h-cVi_7+%mtlNR>|5sJ~mwHBElLll|U{D zO$?h|QeqhT{9L8kd8JFw-|_LeamLCu#_Ik^RWvlaII7{7G`tiy(sNt9Flg3?mwwo6 z1d!? zRky%&i6MUE)`Gd-|fOG&tjtRRB4xZItMz@tLva%TI~p(Xfwv9ug&WK zK={Ay@-8LUuUV-?Mz9qG0*l-S>kszfH-uc~My&~qc$%Uf05^*a>nfD?2FliElzPF3 z#HfDW%&rUH#bxF z+io3?;K$9E)Q!-Z&su{m-N3KDqpKJDg?mw?*N3*nsq^zHyG)7zoU7;gsCp(8#sRp$ zO1H79J)lLm&%ETQUu71Bit~C6p^a$Yrq*D>k{yw1OhDr(+j$ z;V!JP(;BZjmS&Y$?)ONHazWMn8D|Qw4Y}Obp!G1^4*!fxbu74^bI%JDIop%3B?`OK z+B-Ufz1L^IygE5N``f?&yPQjfsm&d*DFCO9&Wv*+#w0ScWN2ooo?W$J+ zD3+-sIApDtHr=KP*TS}zeV~aqFpz^j>{SfxVlO0)v3plq`{qH$W3P>s=;!p>C{q*K zm)fQN%U=&5vZ(=(XH!99!z`|7prt`ZBtg*#$VoE6f#L(aq>*05>G|zhj#7enJLfAQ z>IHdkIn*khp!{pEm9Wq^3Pm%v$g$kc_hQ{P>?nB^*m0_=zVB{rNN3m?v}{Pw%V^hh z%&!9n-vun4@+hn}>>qW+nEcT$$-}awiRZ7xVG74t1K0|;!=3O+plLuvG{f=@ zlUt#@jKX5BW1bSqLcBI^i>s8*!Xth%nG_~4pKCF|H93(AdhPKcC8#`ti%Ja^t#AF^f@8@le-_D@P} z4@68&B+IBj)Y7ykrbW`1#&a+zGs(hYU(Nk52WAh?RqPpjUL5}Q0KS)hB;S4C>eU!J zS?!D&MP7+n#cd^eB~rg|sea{2p5jXbq(HEk=|3CT8iW61S;CkC6HEVd@uz!ZYPwhD z-|n425^K-dT96Ng2sLRpp;MDZW(CLq(aWTMKd4?d^5*_*-@GJQh2#QEWM|^tpKbB- z){!2Fx4mOhpv+F3aU3g4sd>BjQP0v_z$i9aU}vb@w9q;Zua(Kf)F35|F(SgD?oAK7 zq%kFfIGUza&ZJGivWb|k#;{gnv=u%HcMM8)?(Lp|?ZiXP>)PXy77`^N4>ZymXX$Y8 zNI4Wa^CAtdLd6%IEU}2Om!kSPS=wtQct)l!RH7jO0SZrq9iF6i5ZVeA+A@pVUm6yUeHTq+Ly}&9~(YOT*A3BbgP{({PRze;7lAc+geP;RCTXGjt?>&jMh$nIl8=BF5O7E8M>&rnbvmqlB; zOy*`s$wOjGuWoHy_!nrHY^s>u?~a0EXLKYk2!t>EA~ zGeMiiYTkrg!obxVxDY4wmK`F{?Rzk&TA1LF%epsSOZcljeBvjq!w0P~Sp>T~=1-cQ z8h;degC00o$YTAc#rT;ALGh~5lqNpgW-J-cR>9n08SZad~yGi_Q{)7%`M zPkH-yk-YQ)1ZD||5PJX8gWh@X_M&DFW!QE9?Xb3OEZQ?n(ooKZW+Wqc{~jodl005* z;GxE+ojjBhMy41K#&ZL)oMso{>uwbBG~*1l*IFd`w<~2tr~#j>j;NKx(2{VqCPnJX z3rD7~puY)5be>ddBU&CT)Z>Q_@JG%Sc5tnQ>BO=O%s6xv{~Jk@3IVU3r&0WvhkjkA z>-nVjMR<|WltF>JFjbRkgHEzozy1!B*9KK2gx)yLx=LtT(;^B>YQgc_j@^%3A1zY- z^=hGu9ye_A+DeqEOiZ}G2tM;k;VI#wr=I(xw-SH_Jabl^^ScA#Ov_GMnKz-Fk)l_< zQgBD_O!0(^F^ywnA-t9jhtM7cG`Ye^;<^cH0e6+#ef#I^ibGy)3pV^tj=q!sW)%Ih zhV@RXbCs~xu+W}Lskc#Ny1o?mZWG&qV1dIUqUQ09hvglq{WzpvfvTUHq^o;6_V{9F z9=Qw7xs!~sOE^vQMA1Q2sS1uspbR3Ojw!>~?d;xunPH%I%5*E-*7SWBR`92yrt0kw zR&xdm&2QUu*?{7-->t^%8Vw#}>>j|5Lu#x^klWO~D~X%bhEr6sv@w2ChmD^$Hh5a3 zuD=g00F#0tE5cf@$xHsx-Pdw~jN7%y-SQH7^ov?v&(elfmW?j=b8c>K)<}LS0ko20 zZU)s%PAOrvvsM-WtI5Z6^F0CC+Mo~NkY%|!ufG=4NI}o?Q>px%J@%UPAbjt#RihTX zkL!2v4r;)Q`4@yu5!NEkBBDBNg4YaMCF}O<0#2gv4_H(43|~#OGu2B;_Qsxne{1q- zCz}1=e>Bd-0DQ09uG1=5g$Nbl>w@;~?LyHRtgflN4FB8FGUM^*S5(zus+Dxrvg(he zt}42xY#;JjE&gw}O7Hq5pqsz{^|TrP-`e@{YyAH)p4#`nwt@@F z;t@P6cE*F$JMDM7V2KwJutvu_Kzf^ii!%)lhz$kVoj9Y|R{>K57{7%Nz$lB6DR@#@ z!dlF$NT~4g1#L}I&!w_WJ=@y>*r|(xeuWUF$p}7_Ac~Id+a{n-KL4x9f0HhCp%5~-+i+CRsJ92@#UXnnH-i%VBgI7DXRZjN??Jk)|5b3D&c*#fL?~^Celk@ z1Dj&{!W+*mM%3OIsQTYsFtWx80aK}cKHwI^f->29RGhu39S`r?RA^CEKP}2GLE}$V zvl=Spaft?qJ2ObxQ89Y}H)V)x*ReZ(y{uh*Kn;sjOaHEFb|j(SnQHB6YUMSSbvyDI ztEFWpbXrd{3aU)!A`i@+031xHz$=m@L6r{e($Q8`7cPeHp`)LOrzz1|?Tub1U7?UO zOp+eF_399tW(mC`yc!&#dhT#J$rJ2tKyR>5WtNb+*-+Vl-uKPEzjw7Pds|lV58#I_aEym4G($0i+o+< z`RUpJ>APpIj?VTDj}A`GPxp@Ye}47<&R@QIeROty{Oa)N?7Ly*C3b)*(te9{$iZ$Nq>}S!Tn0m8? zn(-DGb>d?_Q`iJ%qkrFC;CutVwWk6{KUI`rm(8Ji0Xf~O`MOM=*Y7PnZ^QmZn(6s! z@n97J?3mrUdM}=m)aK)mi`ur^6<=W)!P&E8?_Dc}w0hcUmtJqRc44z>v;SJIX1%z` z+HAb?rto|NzMYv`{tVU;a2BLg`jHnnn2oVw+rDkt2RM3b9EO2 zs#mOoX=f)_&r*S@H~btD)4<>h5SaxSe_EMY(COWTrUC3;QqzEScd=;zx`*5}AT1M| zmeWujI&TPA`4#^r)7;fRxb#ntbedrA+ZWa2pLSW`|7k*xZIuNz8d7P$^W{4tU8lB| zxh82X>uMp!Rn@YvUB5c>QdKX?R`P0Y9$)*s(c=%UJmY=Ov3Mui9%T%7CAr(tUX0^P z*xd@Rc6`Jgal7qew%l;%Vi^xLUM&}(#q$3paPOxU%`aB@EMUV{1g*|Y?Zl585<{wZ zS;yr9UN0*LHfqT#x5$}UFo1RV04u~2Yafg7Ot@EGuWUXL(WrQQxW6PiS$;C23LlrB zgz!mq-zaE#zP_qc9l3wdo@%Gy%WBQj?hn_Wcly1m)i@G==iaOuTo~xB#bw=2b6s2B zbC;s}7gD*C8A0$g{k8_KDuwN!)L*GQ~_E~c59?^97nH^rFd=?Gq$PXavvhF=s_ zyPd67(7cIz7c{&JUE7}4DhS$Ub?mCI7OaTl=iZ|J`h3D?wfqk+weSuJAUEWH?LK+3 z*~tIc-rU{%n*Z?_&q~>`?v?>C2j8lE5zl4oZs&a-|3h`oYvIi^8otdv@ou#EyvZls zdCH$C52bS(zLTzv+Y$Wci?cR5(^0C=>A!zpO;Ptr$=(fU6|ih~b&4#?OKHuG4T0N{%~ikJ{vx7lwUsk7TMu6EtS!}R5UGrw%>e@p%mc(L`B`3ge&1N8U7r*;p7=cSns$F1bKo3<6e z#>EUCNb`NvtXU@6Rnvk1bvo$ass|1qA$`@hZg z&q|xW>3w&8>lgfg<`w8a%GPiC54i7}-n{9%^xJuhLAG~&SE0=(*(SWlc60h=?KQ8& zehHiNZH;DladY}L>IS`Y0F*_@e{8YRe3n2VIdAlyGaKULgZZRoBV z`SsZ;#8k*2oKiJ3|625eaQt^MH2)R_v*}R(Q~Z>dY*+yrljtJPz+ON1LnyDZ;D>Nb zE`lFIm1e;Y|3B!%uS8IuOE}y=kb%~b?=Vt9NMj^JTTk$JU!v+-CjTb~d;2dB!Zf~T zA2;p)KHY5G|FyNdz58|j|0qx2zlr|$-`w@O2aW}P4+8IXc^l}23z!iRUlCz%GJ17d z-av`wMrm`IQ335juO}0^0at=5g$yWzzi=z}S&~nuWSpP_P~8bB!yqu~#@>fC=3Gf7 zNG5@Jo0ssWjr7~KnNrzkIGj>7%f}|j;PH9H(==yP&4-TT&x?VrSJw#xXF%Xhb>mkF zw6;E8uP#q6uOWZ)3nrku*Qd zKj*sEaNY2O`@$N;JTlkU8Z955n?W8jsYsH5xoV<4JnCM*IXQT7uy=ZJezf=U;BDt% zhfDO8KZx43%5T^D#c#@{>oCTIUuo2AT_1-VN9^jYFzDBeE+2Nzx(9B@VOU>a$}drZ zuB2JZyMV5|pGpFel1J2BhHqr5xE8=EF=T!="" checks) + {{- range $i := .Values.cadvisor.metricsTuning.normalizeUnnecessaryLabels }} + {{- range $label := $i.labels }} + rule { + source_labels = ["__name__", {{ $label | quote }}] + separator = "@" + regex = "{{ $i.metric }}@.*" + target_label = {{ $label | quote }} + replacement = "NA" + } + {{- end }} + {{- end }} +{{- end }} +{{- if .Values.cadvisor.metricsTuning.keepPhysicalFilesystemDevices }} + // Filter out non-physical devices/interfaces + rule { + source_labels = ["__name__", "device"] + separator = "@" + regex = "container_fs_.*@(/dev/)?({{ join "|" .Values.cadvisor.metricsTuning.keepPhysicalFilesystemDevices }})" + target_label = "__keepme" + replacement = "1" + } + rule { + source_labels = ["__name__", "__keepme"] + separator = "@" + regex = "container_fs_.*@" + action = "drop" + } + rule { + source_labels = ["__name__"] + regex = "container_fs_.*" + target_label = "__keepme" + replacement = "" + } +{{- end }} +{{- if .Values.cadvisor.metricsTuning.keepPhysicalNetworkDevices }} + rule { + source_labels = ["__name__", "interface"] + separator = "@" + regex = "container_network_.*@({{ join "|" .Values.cadvisor.metricsTuning.keepPhysicalNetworkDevices }})" + target_label = "__keepme" + replacement = "1" + } + rule { + source_labels = ["__name__", "__keepme"] + separator = "@" + regex = "container_network_.*@" + action = "drop" + } + rule { + source_labels = ["__name__"] + regex = "container_network_.*" + target_label = "__keepme" + replacement = "" + } +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_helpers.tpl b/charts/feature-cluster-metrics/templates/_helpers.tpl new file mode 100644 index 000000000..36b79ae92 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_helpers.tpl @@ -0,0 +1,17 @@ +{{/* +Create a default fully qualified name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "feature.clusterMetrics.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride | lower }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" | lower }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" | lower }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl b/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl new file mode 100644 index 000000000..2231d0e9f --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kepler.alloy.tpl @@ -0,0 +1,77 @@ +{{ define "feature.clusterMetrics.kepler.allowList" }} +{{ if .Values.cadvisor.metricsTuning.useDefaultAllowList }} +{{ "default-allow-lists/kepler.yaml" | .Files.Get }} +{{ end }} +{{ if .Values.kepler.metricsTuning.includeMetrics }} +{{ .Values.kepler.metricsTuning.includeMetrics | toYaml }} +{{ end }} +{{ end }} + +{{- define "feature.clusterMetrics.kepler.alloy" }} +{{- if .Values.kepler.enabled }} +{{- $metricAllowList := include "feature.clusterMetrics.kepler.allowList" . }} +{{- $metricDenyList := .Values.kepler.metricsTuning.excludeMetrics }} +{{- $labelSelectors := list }} +{{- range $k, $v := .Values.kepler.labelMatchers }} +{{- $labelSelectors = append $labelSelectors (printf "%s=%s" $k $v) }} +{{- end }} + +discovery.kubernetes "kepler" { + role = "pod" + namespaces { + own_namespace = true + } + selectors { + role = "pod" + label = {{ $labelSelectors | join "," | quote }} + } +} + +discovery.relabel "kepler" { + targets = discovery.kubernetes.kepler.targets + rule { + source_labels = ["__meta_kubernetes_pod_node_name"] + action = "replace" + target_label = "instance" + } +{{- if .Values.kepler.extraDiscoveryRules }} +{{ .Values.kepler.extraDiscoveryRules | indent 2 }} +{{- end }} +} + +prometheus.scrape "kepler" { + targets = discovery.relabel.kepler.output + job_name = "integrations/kepler" + honor_labels = true + scrape_interval = {{ .Values.kepler.scrapeInterval | default .Values.global.scrapeInterval | quote }} + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.kepler.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kepler.receiver] +} + +prometheus.relabel "kepler" { + max_cache_size = {{ .Values.kepler.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if $metricAllowList }} + rule { + source_labels = ["__name__"] + regex = "up|{{ $metricAllowList | fromYamlArray | join "|" }}" + action = "keep" + } +{{- end }} +{{- if $metricDenyList }} + rule { + source_labels = ["__name__"] + regex = {{ $metricDenyList | join "|" | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.kepler.extraMetricProcessingRules }} +{{ .Values.kepler.extraMetricProcessingRules | indent 2 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl b/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl new file mode 100644 index 000000000..46ff16df6 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kube_controller_manager.alloy.tpl @@ -0,0 +1,68 @@ +{{- define "feature.clusterMetrics.kubeControllerManager.alloy" }} +{{- if or .Values.kubeControllerManager.enabled (and .Values.controlPlane.enabled (not (eq .Values.kubeControllerManager.enabled false))) }} +{{- $metricAllowList := .Values.kubeControllerManager.metricsTuning.includeMetrics }} +{{- $metricDenyList := .Values.kubeControllerManager.metricsTuning.excludeMetrics }} + +discovery.kubernetes "kube_controller_manager" { + role = "pod" + namespaces { + names = ["kube-system"] + } + selectors { + role = "pod" + label = "component=kube-controller-manager" + } +} + +discovery.relabel "kube_controller_manager" { + targets = discovery.kubernetes.kube_controller_manager.targets + rule { + source_labels = ["__address__"] + replacement = "$1:{{ .Values.kubeControllerManager.port }}" + target_label = "__address__" + } +{{- if .Values.kubeControllerManager.extraDiscoveryRules }} +{{ .Values.kubeControllerManager.extraDiscoveryRules | indent 2 }} +{{- end }} +} + +prometheus.scrape "kube_controller_manager" { + targets = discovery.relabel.kube_controller_manager.output + job_name = "kube-controller-manager" + scheme = "https" + scrape_interval = {{ .Values.kubeControllerManager.scrapeInterval | default .Values.global.scrapeInterval | quote }} + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.kubeControllerManager.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kube_controller_manager.receiver] +} + +prometheus.relabel "kube_controller_manager" { + max_cache_size = {{ .Values.kubeControllerManager.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if $metricAllowList }} + rule { + source_labels = ["__name__"] + regex = "up|{{ $metricAllowList | join "|" }}" + action = "keep" + } +{{- end }} +{{- if $metricDenyList }} + rule { + source_labels = ["__name__"] + regex = {{ $metricDenyList | join "|" | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.kubeControllerManager.extraMetricProcessingRules }} +{{ .Values.kubeControllerManager.extraMetricProcessingRules | indent 2 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl b/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl new file mode 100644 index 000000000..679ebf1aa --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kube_proxy.alloy.tpl @@ -0,0 +1,64 @@ +{{- define "feature.clusterMetrics.kubeProxy.alloy" }} +{{- if or .Values.kubeProxy.enabled (and .Values.controlPlane.enabled (not (eq .Values.kubeProxy.enabled false))) }} +{{- $metricAllowList := .Values.kubeProxy.metricsTuning.includeMetrics }} +{{- $metricDenyList := .Values.kubeProxy.metricsTuning.excludeMetrics }} + +discovery.kubernetes "kube_proxy" { + role = "pod" + namespaces { + names = ["kube-system"] + } + selectors { + role = "pod" + label = "k8s-app=kube-proxy" + } +} + +discovery.relabel "kube_proxy" { + targets = discovery.kubernetes.kube_proxy.targets + rule { + source_labels = ["__address__"] + replacement = "$1:{{ .Values.kubeProxy.port }}" + target_label = "__address__" + } +{{- if .Values.kubeProxy.extraDiscoveryRules }} +{{ .Values.kubeProxy.extraDiscoveryRules | indent 2 }} +{{- end }} +} + +prometheus.scrape "kube_proxy" { + targets = discovery.relabel.kube_proxy.output + job_name = "integrations/kubernetes/kube-proxy" + scheme = "http" + scrape_interval = {{ .Values.kubeProxy.scrapeInterval | default .Values.global.scrapeInterval | quote }} + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.kubeProxy.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kube_proxy.receiver] +} + +prometheus.relabel "kube_proxy" { + max_cache_size = {{ .Values.kubeProxy.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if $metricAllowList }} + rule { + source_labels = ["__name__"] + regex = "up|{{ $metricAllowList | join "|" }}" + action = "keep" + } +{{- end }} +{{- if $metricDenyList }} + rule { + source_labels = ["__name__"] + regex = {{ $metricDenyList | join "|" | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.kubeProxy.extraMetricProcessingRules }} +{{ .Values.kubeProxy.extraMetricProcessingRules | indent 2 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl b/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl new file mode 100644 index 000000000..1fa23cb36 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kube_scheduler.alloy.tpl @@ -0,0 +1,68 @@ +{{- define "feature.clusterMetrics.kubeScheduler.alloy" }} +{{- if or .Values.kubeScheduler.enabled (and .Values.controlPlane.enabled (not (eq .Values.kubeScheduler.enabled false))) }} +{{- $metricAllowList := .Values.kubeScheduler.metricsTuning.includeMetrics }} +{{- $metricDenyList := .Values.kubeScheduler.metricsTuning.excludeMetrics }} + +discovery.kubernetes "kube_scheduler" { + role = "pod" + namespaces { + names = ["kube-system"] + } + selectors { + role = "pod" + label = "component=kube-scheduler" + } +} + +discovery.relabel "kube_scheduler" { + targets = discovery.kubernetes.kube_scheduler.targets + rule { + source_labels = ["__address__"] + replacement = "$1:{{ .Values.kubeScheduler.port }}" + target_label = "__address__" + } +{{- if .Values.kubeScheduler.extraDiscoveryRules }} +{{ .Values.kubeScheduler.extraDiscoveryRules | indent 2 }} +{{- end }} +} + +prometheus.scrape "kube_scheduler" { + targets = discovery.relabel.kube_scheduler.output + job_name = "kube-scheduler" + scheme = "https" + scrape_interval = {{ .Values.kubeScheduler.scrapeInterval | default .Values.global.scrapeInterval | quote }} + bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + tls_config { + insecure_skip_verify = true + } + clustering { + enabled = true + } +{{- if or $metricAllowList $metricDenyList .Values.kubeScheduler.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kube_scheduler.receiver] +} + +prometheus.relabel "kube_scheduler" { + max_cache_size = {{ .Values.kubeScheduler.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if $metricAllowList }} + rule { + source_labels = ["__name__"] + regex = "up|{{ $metricAllowList | join "|" }}" + action = "keep" + } +{{- end }} +{{- if $metricDenyList }} + rule { + source_labels = ["__name__"] + regex = {{ $metricDenyList | join "|" | quote }} + action = "drop" + } +{{- end }} +{{- if .Values.kubeScheduler.extraMetricProcessingRules }} +{{ .Values.kubeScheduler.extraMetricProcessingRules | indent 2 }} +{{- end }} +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl b/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl new file mode 100644 index 000000000..078ea834f --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kube_state_metrics.alloy.tpl @@ -0,0 +1,57 @@ +{{ define "feature.clusterMetrics.kube_state_metrics.allowList" }} +{{ if (index .Values "kube-state-metrics").metricsTuning.useDefaultAllowList }} +{{ "default-allow-lists/kube-state-metrics.yaml" | .Files.Get }} +{{ end }} +{{ if (index .Values "kube-state-metrics").metricsTuning.includeMetrics }} +{{ (index .Values "kube-state-metrics").metricsTuning.includeMetrics | toYaml }} +{{ end }} +{{ end }} + +{{- define "feature.clusterMetrics.kube_state_metrics.alloy" }} +{{- if (index .Values "kube-state-metrics").enabled }} +{{- $metricAllowList := include "feature.clusterMetrics.kube_state_metrics.allowList" . }} +{{- $metricDenyList := (index .Values "kube-state-metrics").metricsTuning.excludeMetrics }} + +import.git "kube_state_metrics" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/kubernetes/kube-state-metrics/metrics.alloy" + pull_frequency = "15m" +} + +kube_state_metrics.kubernetes "targets" { + label_selectors = [ +{{- range $label, $value := (index .Values "kube-state-metrics").labelMatchers }} + {{ printf "%s=%s" $label $value | quote }}, +{{- end }} +{{- if (index .Values "kube-state-metrics").deploy }} + {{ printf "release=%s" .Release.Name | quote }}, +{{- end }} + ] +} + +kube_state_metrics.scrape "metrics" { + targets = kube_state_metrics.kubernetes.targets.output + clustering = true +{{- if $metricAllowList }} + keep_metrics = "up|{{ $metricAllowList | fromYamlArray | join "|" }}" +{{- end }} +{{- if $metricDenyList }} + drop_metrics = {{ $metricDenyList | join "|" | quote }} +{{- end }} + scrape_interval = {{ (index .Values "kube-state-metrics").scrapeInterval | default .Values.global.scrapeInterval | quote }} + max_cache_size = {{ (index .Values "kube-state-metrics").maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if (index .Values "kube-state-metrics").extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kube_state_metrics.receiver] +} + +prometheus.relabel "kube_state_metrics" { + max_cache_size = {{ (index .Values "kube-state-metrics").maxCacheSize | default .Values.global.maxCacheSize | int }} + + {{(index .Values "kube-state-metrics").extraMetricProcessingRules}} + +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl b/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl new file mode 100644 index 000000000..d605ed0fd --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_kubelet.alloy.tpl @@ -0,0 +1,38 @@ +{{ define "feature.clusterMetrics.kubelet.allowList" }} +{{ if .Values.kubelet.metricsTuning.useDefaultAllowList }} +{{ "default-allow-lists/kubelet.yaml" | .Files.Get }} +{{ end }} +{{ if .Values.kubelet.metricsTuning.includeMetrics }} +{{ .Values.kubelet.metricsTuning.includeMetrics | toYaml }} +{{ end }} +{{ end }} + +{{- define "feature.clusterMetrics.kubelet.alloy" }} +{{- if .Values.kubelet.enabled }} +{{- $metricAllowList := include "feature.clusterMetrics.kubelet.allowList" . }} +{{- $metricDenyList := .Values.kubelet.metricsTuning.excludeMetrics }} + +kubernetes.kubelet "scrape" { + clustering = true +{{- if $metricAllowList }} + keep_metrics = "up|{{ $metricAllowList | fromYamlArray | join "|" }}" +{{- end }} +{{- if $metricDenyList }} + drop_metrics = {{ $metricDenyList | join "|" | quote }} +{{- end }} + scrape_interval = {{ .Values.kubelet.scrapeInterval | default .Values.global.scrapeInterval | quote }} + max_cache_size = {{ .Values.kubelet.maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if .Values.kubelet.extraMetricProcessingRules }} + forward_to = [prometheus.relabel.kubelet.receiver] +} + +prometheus.relabel "kubelet" { + max_cache_size = {{ .Values.kubelet.maxCacheSize | default .Values.global.maxCacheSize | int }} + + {{ .Values.kubelet.extraMetricProcessingRules | indent 2 }} + +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_module.alloy.tpl b/charts/feature-cluster-metrics/templates/_module.alloy.tpl new file mode 100644 index 000000000..0b75b253f --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_module.alloy.tpl @@ -0,0 +1,26 @@ +{{- define "feature.clusterMetrics.module" }} +declare "cluster_metrics" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + + {{- if or .Values.cadvisor.enabled .Values.kubelet.enabled (or .Values.apiServer.enabled (and .Values.controlPlane.enabled (not (eq .Values.apiServer.enabled false)))) }} + import.git "kubernetes" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/kubernetes/core/metrics.alloy" + pull_frequency = "15m" + } + {{- end }} + {{- include "feature.clusterMetrics.kubelet.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.cadvisor.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.apiServer.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.kubeControllerManager.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.kubeProxy.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.kubeScheduler.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.kube_state_metrics.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.node_exporter.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.windows_exporter.alloy" . | indent 2 }} + {{- include "feature.clusterMetrics.kepler.alloy" . | indent 2 }} +} +{{- end -}} \ No newline at end of file diff --git a/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl b/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl new file mode 100644 index 000000000..e7533b774 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_node_exporter.alloy.tpl @@ -0,0 +1,60 @@ +{{- define "feature.clusterMetrics.node_exporter.allowList" }} +{{ if (index .Values "node-exporter").metricsTuning.useDefaultAllowList }} +{{ "default-allow-lists/node-exporter.yaml" | .Files.Get }} +{{ end }} +{{ if (index .Values "node-exporter").metricsTuning.useIntegrationAllowList }} +{{ "default-allow-lists/node-exporter-integration.yaml" | .Files.Get }} +{{ end }} +{{ if (index .Values "node-exporter").metricsTuning.includeMetrics }} +{{ (index .Values "node-exporter").metricsTuning.includeMetrics | toYaml }} +{{ end }} +{{- end }} + +{{- define "feature.clusterMetrics.node_exporter.alloy" }} +{{- if (index .Values "node-exporter").enabled }} +{{- $metricAllowList := include "feature.clusterMetrics.node_exporter.allowList" . }} +{{- $metricDenyList := (index .Values "node-exporter").metricsTuning.excludeMetrics }} + +import.git "node_exporter" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/system/node-exporter/metrics.alloy" + pull_frequency = "15m" +} + +node_exporter.kubernetes "targets" { + label_selectors = [ +{{- range $label, $value := (index .Values "node-exporter").labelMatchers }} + {{ printf "%s=%s" $label $value | quote }}, +{{- end }} +{{- if (index .Values "node-exporter").deploy }} + {{ printf "release=%s" .Release.Name | quote }}, +{{- end }} + ] +} + +node_exporter.scrape "metrics" { + targets = node_exporter.kubernetes.targets.output + clustering = true +{{- if $metricAllowList }} + keep_metrics = "up|{{ $metricAllowList | fromYamlArray | join "|" }}" +{{- end }} +{{- if $metricDenyList }} + drop_metrics = {{ $metricDenyList | join "|" | quote }} +{{- end }} + scrape_interval = {{ (index .Values "node-exporter").scrapeInterval | default .Values.global.scrapeInterval | quote }} + max_cache_size = {{ (index .Values "node-exporter").maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if (index .Values "node-exporter").extraMetricProcessingRules }} + forward_to = [prometheus.relabel.node_exporter.receiver] +} + +prometheus.relabel "node_exporter" { + max_cache_size = {{ (index .Values "node-exporter").maxCacheSize | default .Values.global.maxCacheSize | int }} + + {{(index .Values "node-exporter").extraMetricProcessingRules}} + +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_notes.tpl b/charts/feature-cluster-metrics/templates/_notes.tpl new file mode 100644 index 000000000..d9dd4f326 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_notes.tpl @@ -0,0 +1,20 @@ +{{- define "feature.clusterMetrics.notes.deployments" }} +{{- if (index .Values "kube-state-metrics").deploy }} +* kube-state-metrics (Deployment) +{{- end }} +{{- if (index .Values "node-exporter").deploy }} +* Node Exporter (DaemonSet) +{{- end }} +{{- if (index .Values "windows-exporter").deploy }} +* Windows Exporter (DaemonSet) +{{- end }} +{{- if .Values.kepler.enabled }} +* Kepler (DaemonSet) +{{- end }} +{{- end }} + +{{- define "feature.clusterMetrics.notes.task" }} +Scrape Kubernetes Cluster metrics +{{- end }} + +{{- define "feature.clusterMetrics.notes.actions" }}{{- end }} diff --git a/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl b/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl new file mode 100644 index 000000000..d32787b62 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/_windows_exporter.alloy.tpl @@ -0,0 +1,60 @@ +{{- define "feature.clusterMetrics.windows_exporter.allowList" }} +{{ if (index .Values "windows-exporter").metricsTuning.useDefaultAllowList }} +{{ "default-allow-lists/windows-exporter.yaml" | .Files.Get }} +{{ end }} +{{ if (index .Values "windows-exporter").metricsTuning.useIntegrationAllowList }} +{{ "default-allow-lists/windows-exporter-integration.yaml" | .Files.Get }} +{{ end }} +{{ if (index .Values "windows-exporter").metricsTuning.includeMetrics }} +{{ (index .Values "windows-exporter").metricsTuning.includeMetrics | toYaml }} +{{ end }} +{{- end }} + +{{- define "feature.clusterMetrics.windows_exporter.alloy" }} +{{- if (index .Values "windows-exporter").enabled }} +{{- $metricAllowList := include "feature.clusterMetrics.windows_exporter.allowList" . }} +{{- $metricDenyList := (index .Values "windows-exporter").metricsTuning.excludeMetrics }} + +import.git "windows_exporter" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/system/node-exporter/metrics.alloy" + pull_frequency = "15m" +} + +windows_exporter.kubernetes "targets" { + label_selectors = [ +{{- range $label, $value := (index .Values "windows-exporter").labelMatchers }} + {{ printf "%s=%s" $label $value | quote }}, +{{- end }} +{{- if (index .Values "windows-exporter").deploy }} + {{ printf "release=%s" .Release.Name | quote }}, +{{- end }} + ] +} + +windows_exporter.scrape "metrics" { + targets = windows_exporter.kubernetes.targets.output + clustering = true +{{- if $metricAllowList }} + keep_metrics = "up|{{ $metricAllowList | fromYamlArray | join "|" }}" +{{- end }} +{{- if $metricDenyList }} + drop_metrics = {{ $metricDenyList | join "|" | quote }} +{{- end }} + scrape_interval = {{ (index .Values "windows-exporter").scrapeInterval | default .Values.global.scrapeInterval | quote }} + max_cache_size = {{ (index .Values "windows-exporter").maxCacheSize | default .Values.global.maxCacheSize | int }} +{{- if (index .Values "windows-exporter").extraMetricProcessingRules }} + forward_to = [prometheus.relabel.windows_exporter.receiver] +} + +prometheus.relabel "windows_exporter" { + max_cache_size = {{ (index .Values "windows-exporter").maxCacheSize | default .Values.global.maxCacheSize | int }} + + {{(index .Values "windows-exporter").extraMetricProcessingRules}} + +{{- end }} + forward_to = argument.metrics_destinations.value +} +{{- end }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/configmap.yaml b/charts/feature-cluster-metrics/templates/configmap.yaml new file mode 100644 index 000000000..702d3903d --- /dev/null +++ b/charts/feature-cluster-metrics/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.deployAsConfigMap }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "feature.clusterMetrics.fullname" . }} + namespace: {{ .Release.Namespace }} +data: + module.alloy: |- + {{- include "feature.clusterMetrics.module" . | indent 4 }} +{{- end }} diff --git a/charts/feature-cluster-metrics/templates/platform_specific/openshift/kepler-scc.yaml b/charts/feature-cluster-metrics/templates/platform_specific/openshift/kepler-scc.yaml new file mode 100644 index 000000000..1b2f21611 --- /dev/null +++ b/charts/feature-cluster-metrics/templates/platform_specific/openshift/kepler-scc.yaml @@ -0,0 +1,66 @@ +{{- if and (eq .Values.global.platform "openshift") .Values.kepler.enabled }} +--- +apiVersion: security.openshift.io/v1 +kind: SecurityContextConstraints +metadata: + name: {{ include "kepler.fullname" .Subcharts.kepler }} +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: true +allowHostPID: false +allowHostPorts: true +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: [] +defaultAddCapabilities: null +defaultAllowPrivilegeEscalation: false +forbiddenSysctls: +- '*' +fsGroup: + type: RunAsAny +groups: [] +priority: null +readOnlyRootFilesystem: true +requiredDropCapabilities: null +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +seccompProfiles: + - runtime/default +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:{{ .Release.Namespace }}:{{ include "kepler.fullname" .Subcharts.kepler }} +volumes: +- configMap +- hostPath +- projected +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kepler.fullname" .Subcharts.kepler }}-scc +rules: +- verbs: + - use + apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - {{ include "kepler.fullname" .Subcharts.kepler }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kepler.fullname" .Subcharts.kepler }}-scc +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "kepler.fullname" .Subcharts.kepler }}-scc +subjects: +- kind: ServiceAccount + name: {{ include "kepler.fullname" .Subcharts.kepler }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/feature-cluster-metrics/tests/__snapshot__/.gitkeep b/charts/feature-cluster-metrics/tests/__snapshot__/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/charts/feature-cluster-metrics/tests/default_test.yaml b/charts/feature-cluster-metrics/tests/default_test.yaml new file mode 100644 index 000000000..749008233 --- /dev/null +++ b/charts/feature-cluster-metrics/tests/default_test.yaml @@ -0,0 +1,182 @@ +suite: Test default values +templates: + - configmap.yaml +tests: + - it: should render the default configuration + set: + deployAsConfigMap: true + asserts: + - isKind: + of: ConfigMap + - equal: + path: data["module.alloy"] + value: |- + declare "cluster_metrics" { + argument "metrics_destinations" { + comment = "Must be a list of metric destinations where collected metrics should be forwarded to" + } + import.git "kubernetes" { + repository = "https://github.com/grafana/alloy-modules.git" + revision = "main" + path = "modules/kubernetes/core/metrics.alloy" + pull_frequency = "15m" + } + + kubernetes.kubelet "scrape" { + clustering = true + keep_metrics = "up|container_cpu_usage_seconds_total|kubelet_certificate_manager_client_expiration_renew_errors|kubelet_certificate_manager_client_ttl_seconds|kubelet_certificate_manager_server_ttl_seconds|kubelet_cgroup_manager_duration_seconds_bucket|kubelet_cgroup_manager_duration_seconds_count|kubelet_node_config_error|kubelet_node_name|kubelet_pleg_relist_duration_seconds_bucket|kubelet_pleg_relist_duration_seconds_count|kubelet_pleg_relist_interval_seconds_bucket|kubelet_pod_start_duration_seconds_bucket|kubelet_pod_start_duration_seconds_count|kubelet_pod_worker_duration_seconds_bucket|kubelet_pod_worker_duration_seconds_count|kubelet_running_container_count|kubelet_running_containers|kubelet_running_pod_count|kubelet_running_pods|kubelet_runtime_operations_errors_total|kubelet_runtime_operations_total|kubelet_server_expiration_renew_errors|kubelet_volume_stats_available_bytes|kubelet_volume_stats_capacity_bytes|kubelet_volume_stats_inodes|kubelet_volume_stats_inodes_used|kubernetes_build_info|namespace_workload_pod|rest_client_requests_total|storage_operation_duration_seconds_count|storage_operation_errors_total|volume_manager_total_volumes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = argument.metrics_destinations.value + } + + kubernetes.cadvisor "scrape" { + clustering = true + keep_metrics = "up|container_cpu_cfs_periods_total|container_cpu_cfs_throttled_periods_total|container_cpu_usage_seconds_total|container_fs_reads_bytes_total|container_fs_reads_total|container_fs_writes_bytes_total|container_fs_writes_total|container_memory_cache|container_memory_rss|container_memory_swap|container_memory_working_set_bytes|container_network_receive_bytes_total|container_network_receive_packets_dropped_total|container_network_receive_packets_total|container_network_transmit_bytes_total|container_network_transmit_packets_dropped_total|container_network_transmit_packets_total|machine_memory_bytes" + scrape_interval = "60s" + max_cache_size = 100000 + forward_to = [prometheus.relabel.cadvisor.receiver] + } + + prometheus.relabel "cadvisor" { + max_cache_size = 100000 + // Drop empty container labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","container"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*)@" + action = "drop" + } + // Drop empty image labels, addressing https://github.com/google/cadvisor/issues/2688 + rule { + source_labels = ["__name__","image"] + separator = "@" + regex = "(container_cpu_.*|container_fs_.*|container_memory_.*|container_network_.*)@" + action = "drop" + } + // Normalizing unimportant labels (not deleting to continue satisfying