From 8960c2c3b4ca2d669d50314238e8738f083f54ed Mon Sep 17 00:00:00 2001 From: Dixita Narang Date: Tue, 19 Sep 2023 00:47:39 +0000 Subject: [PATCH] KEP to add PSI metrics in Summary API, and for PSI based node actions Signed-off-by: Dixita Narang --- keps/prod-readiness/sig-node/4205.yaml | 6 + keps/sig-node/4205-psi-metric/README.md | 646 ++++++++++++++++++++++++ keps/sig-node/4205-psi-metric/kep.yaml | 47 ++ 3 files changed, 699 insertions(+) create mode 100644 keps/prod-readiness/sig-node/4205.yaml create mode 100644 keps/sig-node/4205-psi-metric/README.md create mode 100644 keps/sig-node/4205-psi-metric/kep.yaml diff --git a/keps/prod-readiness/sig-node/4205.yaml b/keps/prod-readiness/sig-node/4205.yaml new file mode 100644 index 000000000000..efd0ecac1904 --- /dev/null +++ b/keps/prod-readiness/sig-node/4205.yaml @@ -0,0 +1,6 @@ +# The KEP must have an approver from the +# "prod-readiness-approvers" group +# of http://git.k8s.io/enhancements/OWNERS_ALIASES +kep-number: 4205 +alpha: + approver: "@johnbelamaric" \ No newline at end of file diff --git a/keps/sig-node/4205-psi-metric/README.md b/keps/sig-node/4205-psi-metric/README.md new file mode 100644 index 000000000000..8aa72b521cfd --- /dev/null +++ b/keps/sig-node/4205-psi-metric/README.md @@ -0,0 +1,646 @@ +# KEP-4205: PSI Based Node Conditions + +- [Release Signoff Checklist](#release-signoff-checklist) +- [Summary](#summary) +- [Motivation](#motivation) + - [Goals](#goals) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) + - [User Stories (Optional)](#user-stories-optional) + - [Story 1](#story-1) + - [Story 2](#story-2) + - [Risks and Mitigations](#risks-and-mitigations) +- [Design Details](#design-details) + - [Phase 1](#phase-1) + - [CPU](#cpu) + - [Memory](#memory) + - [IO](#io) + - [Phase 2 to add PSI based actions.](#phase-2-to-add-psi-based-actions) + - [Test Plan](#test-plan) + - [Prerequisite testing updates](#prerequisite-testing-updates) + - [Unit tests](#unit-tests) + - [Integration tests](#integration-tests) + - [e2e tests](#e2e-tests) + - [Graduation Criteria](#graduation-criteria) + - [Phase 1: Alpha](#phase-1-alpha) + - [Phase 2: Alpha](#phase-2-alpha) + - [Beta](#beta) + - [GA](#ga) + - [Deprecation](#deprecation) + - [Upgrade / Downgrade Strategy](#upgrade--downgrade-strategy) + - [Version Skew Strategy](#version-skew-strategy) +- [Production Readiness Review Questionnaire](#production-readiness-review-questionnaire) + - [Feature Enablement and Rollback](#feature-enablement-and-rollback) + - [Rollout, Upgrade and Rollback Planning](#rollout-upgrade-and-rollback-planning) + - [Monitoring Requirements](#monitoring-requirements) + - [Dependencies](#dependencies) + - [Scalability](#scalability) + - [Troubleshooting](#troubleshooting) +- [Implementation History](#implementation-history) +- [Drawbacks](#drawbacks) +- [Infrastructure Needed (Optional)](#infrastructure-needed-optional) + + +## Release Signoff Checklist + + + +Items marked with (R) are required *prior to targeting to a milestone / release*. + +- [ ] (R) Enhancement issue in release milestone, which links to KEP dir in [kubernetes/enhancements] (not the initial KEP PR) +- [ ] (R) KEP approvers have approved the KEP status as `implementable` +- [ ] (R) Design details are appropriately documented +- [ ] (R) Test plan is in place, giving consideration to SIG Architecture and SIG Testing input (including test refactors) + - [ ] e2e Tests for all Beta API Operations (endpoints) + - [ ] (R) Ensure GA e2e tests meet requirements for [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) + - [ ] (R) Minimum Two Week Window for GA e2e tests to prove flake free +- [ ] (R) Graduation criteria is in place + - [ ] (R) [all GA Endpoints](https://github.com/kubernetes/community/pull/1806) must be hit by [Conformance Tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md) +- [ ] (R) Production readiness review completed +- [ ] (R) Production readiness review approved +- [ ] "Implementation History" section is up-to-date for milestone +- [ ] User-facing documentation has been created in [kubernetes/website], for publication to [kubernetes.io] +- [ ] Supporting documentation—e.g., additional design documents, links to mailing list discussions/SIG meetings, relevant PRs/issues, release notes + + + +[kubernetes.io]: https://kubernetes.io/ +[kubernetes/enhancements]: https://git.k8s.io/enhancements +[kubernetes/kubernetes]: https://git.k8s.io/kubernetes +[kubernetes/website]: https://git.k8s.io/website + +## Summary + +This KEP proposes adding support in kubelet to read Pressure Stall Information (PSI) metric pertaining to CPU, Memory and IO resources exposed from cAdvisor and runc. This will enable kubelet to report node conditions which will be utilized to prevent scheduling of pods on nodes experiencing significant resource constraints. + +## Motivation + +[PSI metric](https://www.kernel.org/doc/Documentation/accounting/psi.txt) provides a quantifiable way to see resource pressure increases as they develop, with a new pressure metric for three major resources (memory, CPU, IO). These pressure metrics are useful for detecting resource shortages and provide nodes the opportunity to respond intelligently - by updating the node condition. + +In short, PSI metric are like barometers that provide fair warning of impending resource shortages on the node, and enable nodes to take more proactive, granular and nuanced steps when major resources (memory, CPU, IO) start becoming scarce. + +### Goals + +This proposal aims to: +1. Enable the kubelet to have the PSI metric of cgroupv2 exposed from cAdvisor and Runc. +2. Enable the pod level PSI metric and expose it in the Summary API. +3. Utilize the node level PSI metric to set node condition and node taints. + +It will have two phases: +Phase 1: includes goal 1, 2 +Phase 2: includes goal 3 + +### Non-Goals + +* Invest in more opportunities to further use PSI metric for pod evictions, +userspace OOM kills, and so on, for future KEPs. + +## Proposal + +### User Stories (Optional) + +#### Story 1 + +Today, to identity disruptions caused by resource crunches, Kubernetes users need to +install node exporter to read PSI metric. With the feature proposed in this enhancement, +PSI metric will be available for customers in the Kubernetes metrics API. + +#### Story 2 + +Kubernetes users want to prevent new pods to be scheduled on the nodes that have resource starvation. By using PSI metric, the kubelet will set Node Condition to avoid pods being scheduled on nodes under high resource pressure. The node controller could then set a (taint on the node based on these new Node Conditions)[https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-nodes-by-condition]. + +### Risks and Mitigations + +There are no significant risks associated with Phase 1 implementation that involves integrating +the PSI metric in kubelet from runc, and exposing these through the metric endpoint. + +Phase 2 involves utilizing the PSI metric to report node conditions. There is a potential +risk of early reporting for nodes under pressure. We intend to address this concern +by conducting careful experimentation with PSI threshold values to identify the optimal +default threshold to be used for reporting the nodes under heavy resource pressure. + +## Design Details + +#### Phase 1 +1. Add new Data structures PSIData and PSIStats corresponding to the PSI metric output format as following: +some avg10=0.00 avg60=0.00 avg300=0.00 total=0 +full avg10=0.00 avg60=0.00 avg300=0.00 total=0 + +```go +type PSIData struct { + Avg10 *float64 `json:”avg10”` + Avg60 *float64 `json:”avg60”` +Avg300 *float64 `json:”avg300”` +Total *float64 `json:”total”` +} + +type PSIStats struct { + Some *PSIData `json:”some,omitempty”` +Full *PSIData `json:”full,omitempty”` +} +``` + +2. Summary API includes stats for both system and kubepods level cgroups. Extend the Summary API to include PSI metric data for each resource obtained from cadvisor. +Note: if cadvisor-less is implemented prior to the implementation of this enhancement, the PSI +metric data will be available through CRI instead. + +##### CPU +```go +type CPUStats struct { +// PSI stats of the overall node + PSI cadvisorapi.PSIStats `json:”psi,omitempty”` +} +``` + +##### Memory +```go +type MemoryStats struct { + // PSI stats of the overall node + PSI cadvisorapi.PSIStats `json:”psi,omitempty”` +} +``` + +##### IO +```go +// IOStats contains data about IO usage. +type IOStats struct { + // The time at which these stats were updated. + Time metav1.Time `json:”time”` + + // PSI stats of the overall node + PSI cadvisorapi.PSIStats `json:”psi,omitempty”` +} + +type NodeStats struct { + // Stats about the IO pressure of the node + IO *IOStats `json:”io,omitempty”` + +} +``` + +#### Phase 2 to add PSI based actions. +**Note:** These actions are tentative, and will depend on different the outcome from testing +and discussions with sig-node members, customers, and other folks. + +1. Introduce a new kubelet config parameter, pressure threshold to let users specify the pressure percentage beyond which the kubelet would report the node condition to disallow workloads to be scheduled on it. + +2. Add new node conditions corresponding to high PSI (beyond threshold levels) on CPU, Memory and IO. + +```go +// These are valid conditions of the node. Currently, we don't have enough information to decide +// node condition. +const ( +… + // Conditions based on pressure at system level cgroup. + NodeSystemCPUContentionPressure NodeConditionType = “SystemCPUContentionPressure” + NodeSystemMemoryContentionPressure NodeConditionType = “SystemMemoryContentionPressure” + NodeSystemDiskContentionPressure NodeConditionType = “SystemDiskContentionPressure” + + // Conditions based on pressure at kubepods level cgroup. + NodeKubepodsCPUContentionPressure NodeConditionType = “KubepodsCPUContentionPressure” + NodeKubepodsMemoryContentionPressure NodeConditionType = “KubepodsMemoryContentionPressure” + NodeKubepodsDiskContentionPressure NodeConditionType = “KubepodsDiskContentionPressure” +) +``` + +3. Kernel collects PSI data for 10s, 60s and 300s timeframes. To determine the optimal observation timeframe, it is necessary to conduct tests and benchmark performance. +In theory, 10s interval might be rapid to taint a node with NoSchedule effect. Therefore, as an initial approach, opting for a 60s timeframe for observation logic appears more appropriate. + + Add the observation logic to add node condition and taint as per following scenarios: + * If avg60 >= threshold, then record an event indicating high resource pressure. + * If avg60 >= threshold and is trending higher i.e. avg10 >= threshold, then set Node Condition for high resource contention pressure. This should ensure no new pods are scheduled on the nodes under heavy resource contention pressure. + * If avg60 >= threshold for a node tainted with NoSchedule effect, and is trending lower i.e. avg10 <= threshold, record an event mentioning the resource contention pressure is trending lower. + * If avg60 < threshold for a node tainted with NoSchedule effect, remove the NodeCondition. + +4. Collaborate with sig-scheduling to modify TaintNodesByCondition feature to integrate new taints for the new Node Conditions introduced in this enhancement. +node.kubernetes.io/memory-contention-pressure=:NoSchedule +node.kubernetes.io/cpu-contention-pressure=:NoSchedule +node.kubernetes.io/disk-contention-pressure=:NoSchedule + +5. Perform experiments to finalize the default optimal pressure threshold value. + +6. Add a new feature gate PSINodeCondition, and guard the node condition related logic behind the feature gate. Set --feature-gates=PSINodeCondition=true to enable the feature. + +### Test Plan + + + +[X] I/we understand the owners of the involved components may require updates to +existing tests to make this code solid enough prior to committing the changes necessary +to implement this enhancement. + +##### Prerequisite testing updates + + + +##### Unit tests + + + + + +- ``: `` - `` + +##### Integration tests + + + + + +- : + +##### e2e tests + + + +- : + +### Graduation Criteria + +#### Phase 1: Alpha + +- PSI integrated in kubelet behind a feature flag. +- Initial e2e tests completed and enabled. + +#### Phase 2: Alpha + +- Implement Phase 2 of the enhancement which enables kubelet to +report node conditions based off PSI values. +- Additional tests coverage. + +#### Beta + +- Feature gate is enabled by default. +- Add documentation for the feature. +- Extend e2e test coverage. +- Allowing time for feedback. + +#### GA +- TBD + +#### Deprecation + +- Announce deprecation and support policy of the existing flag +- Two versions passed since introducing the functionality that deprecates the flag (to address version skew) +- Address feedback on usage/changed behavior, provided on GitHub issues +- Deprecate the flag +--> + +### Upgrade / Downgrade Strategy + +NA + +### Version Skew Strategy + +NA + +## Production Readiness Review Questionnaire + + + +### Feature Enablement and Rollback + + + +###### How can this feature be enabled / disabled in a live cluster? + + + +- [ ] Feature gate (also fill in values in `kep.yaml`) + - Feature gate name: + - Components depending on the feature gate: +- [ ] Other + - Describe the mechanism: + - Will enabling / disabling the feature require downtime of the control + plane? + - Will enabling / disabling the feature require downtime or reprovisioning + of a node? + +###### Does enabling the feature change any default behavior? + + + +###### Can the feature be disabled once it has been enabled (i.e. can we roll back the enablement)? + + + +###### What happens if we reenable the feature if it was previously rolled back? + +###### Are there any tests for feature enablement/disablement? + + + +### Rollout, Upgrade and Rollback Planning + + + +###### How can a rollout or rollback fail? Can it impact already running workloads? + + + +###### What specific metrics should inform a rollback? + + + +###### Were upgrade and rollback tested? Was the upgrade->downgrade->upgrade path tested? + + + +###### Is the rollout accompanied by any deprecations and/or removals of features, APIs, fields of API types, flags, etc.? + + + +### Monitoring Requirements + + + +###### How can an operator determine if the feature is in use by workloads? + + + +###### How can someone using this feature know that it is working for their instance? + + + +- [ ] Events + - Event Reason: +- [ ] API .status + - Condition name: + - Other field: +- [ ] Other (treat as last resort) + - Details: + +###### What are the reasonable SLOs (Service Level Objectives) for the enhancement? + + + +###### What are the SLIs (Service Level Indicators) an operator can use to determine the health of the service? + + + +- [ ] Metrics + - Metric name: + - [Optional] Aggregation method: + - Components exposing the metric: +- [ ] Other (treat as last resort) + - Details: + +###### Are there any missing metrics that would be useful to have to improve observability of this feature? + + + +### Dependencies + + + +###### Does this feature depend on any specific services running in the cluster? + + +No + +### Scalability + + + +###### Will enabling / using this feature result in any new API calls? + +No + +###### Will enabling / using this feature result in introducing new API types? + +No + +###### Will enabling / using this feature result in any new calls to the cloud provider? + +No + +###### Will enabling / using this feature result in increasing size or count of the existing API objects? + +No + +###### Will enabling / using this feature result in increasing time taken by any operations covered by existing SLIs/SLOs? + +No + +###### Will enabling / using this feature result in non-negligible increase of resource usage (CPU, RAM, disk, IO, ...) in any components? + +No. Additional metric i.e. PSI is being read from cadvisor. + +###### Can enabling / using this feature result in resource exhaustion of some node resources (PIDs, sockets, inodes, etc.)? + +No + +### Troubleshooting + +NA + +###### How does this feature react if the API server and/or etcd is unavailable? + +###### What are other known failure modes? + +NA + +###### What steps should be taken if SLOs are not being met to determine the problem? + +## Implementation History + +- 2023/09/13: Initial proposal + +## Drawbacks + +No drawbacks in Phase 1 identified. There's no reason the enhancement should not be +implemented. This enhancement now makes it possible to read PSI metric without installing +additional dependencies + +## Infrastructure Needed (Optional) + +No new infrastructure is needed. \ No newline at end of file diff --git a/keps/sig-node/4205-psi-metric/kep.yaml b/keps/sig-node/4205-psi-metric/kep.yaml new file mode 100644 index 000000000000..86635fdd56aa --- /dev/null +++ b/keps/sig-node/4205-psi-metric/kep.yaml @@ -0,0 +1,47 @@ +title: PSI based Node Conditions +kep-number: 4205 +authors: + - "@ndixita" + - "@dragoncell" +owning-sig: sig-node +participating-sigs: + - sig-node +status: implementable +creation-date: 2023-05-25 +reviewers: + - "@mrunalp" + - "@haircommander" + - "@porterdavid" + - "@rphillips" + +approvers: + - "@sig-node-leads" + +see-also: [] +replaces: [] + +# The target maturity stage in the current dev cycle for this KEP. +stage: alpha + +# The most recent milestone for which work toward delivery of this KEP has been +# done. This can be the current (upcoming) milestone, if it is being actively +# worked on. +latest-milestone: "v1.29" + +# The milestone at which this feature was, or is targeted to be, at each stage. +milestone: + alpha: "v1.29" + +# The following PRR answers are required at alpha release +# List the feature gate name and the components for which it must be enabled +feature-gates: + - name: PSINodeCondition + components: + - kubelet + - kube-controller-manager + - kube-scheduler + +disable-supported: true + +# The following PRR answers are required at beta release +metrics: []