Skip to content

Commit

Permalink
feat: statefulset support
Browse files Browse the repository at this point in the history
  • Loading branch information
maxime1907 authored and machine424 committed Feb 11, 2024
1 parent 8bddc0d commit 67fc3fd
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 30 deletions.
4 changes: 2 additions & 2 deletions helm-chart/kube-hpa-scale-to-zero/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@ apiVersion: v2
name: kube-hpa-scale-to-zero
description: See https://github.com/machine424/kube-hpa-scale-to-zero
type: application
version: 0.3.0
appVersion: "0.3.0"
version: 0.4.0
appVersion: "0.4.0"
3 changes: 3 additions & 0 deletions helm-chart/kube-hpa-scale-to-zero/templates/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ rules:
- apiGroups: ["apps"]
resources: ["deployments/scale"]
verbs: ["get", "patch"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get", "patch"]
- apiGroups: ["autoscaling"]
resources: ["horizontalpodautoscalers"]
verbs: ["get", "list", "watch"]
Expand Down
26 changes: 25 additions & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,14 @@ def update_target(hpa: HPA) -> None:
name=hpa.target_name,
needed_replicas=needed_replicas,
)
case "StatefulSet":
scale_statefulset(
namespace=hpa.namespace,
name=hpa.target_name,
needed_replicas=needed_replicas,
)
case _:
raise ValueError("Only support Deployment as HPA target for now.")
raise ValueError(f"Target kind {hpa.target_kind} not supported.")


def scaling_is_needed(*, current_replicas, needed_replicas) -> bool:
Expand Down Expand Up @@ -180,6 +186,24 @@ def scale_deployment(*, namespace, name, needed_replicas) -> None:
LOGGER.warning(f"Deployment {namespace}/{name} was not found.")


def scale_statefulset(*, namespace, name, needed_replicas) -> None:
try:
scale = APP_V1.read_namespaced_stateful_set_scale(namespace=namespace, name=name)
current_replicas = scale.status.replicas
if not scaling_is_needed(current_replicas=current_replicas, needed_replicas=needed_replicas):
LOGGER.info(f"No need to scale statefulset {namespace}/{name} {current_replicas=} {needed_replicas=}.")
return

scale.spec.replicas = needed_replicas
# Maybe do not scale immediately? but don't want to reimplement an HPA.
APP_V1.patch_namespaced_stateful_set_scale(namespace=namespace, name=name, body=scale)
LOGGER.info(f"StatefulSet {namespace}/{name} was scaled {current_replicas=}->{needed_replicas=}.")
except kubernetes.client.exceptions.ApiException as exc:
if exc.status != 404:
raise exc
LOGGER.warning(f"StatefulSet {namespace}/{name} was not found.")


def parse_cli_args():
parser = argparse.ArgumentParser(
description="kube-hpa-scale-to-zero. Check https://github.com/machine424/kube-hpa-scale-to-zero"
Expand Down
40 changes: 15 additions & 25 deletions tests/e2e_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def setup():
"prometheus-community/prometheus",
"--values",
f"{MANIFESTS_PATH}/prometheus-values.yaml",
"--wait",
]
)
run(
Expand All @@ -57,32 +58,20 @@ def setup():
]
)

run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml"])
run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml", "--wait=true"])
yield
finally:
run(
command=[
"helm",
"delete",
"prometheus",
]
)
run(
command=[
"helm",
"delete",
"prometheus-adapter",
]
)
run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml"])
run(command=["helm", "delete", "prometheus", "--wait"])
run(command=["helm", "delete", "prometheus-adapter", "--wait"])
run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml", "--wait=true"])


def deploy_target(manifest: str):
run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/{manifest}"])
run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/{manifest}", "--wait=true"])


def delete_target(manifest: str):
run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/{manifest}"])
run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/{manifest}", "--wait=true"])


def run_scaler():
Expand All @@ -103,38 +92,39 @@ def set_foo_metric_value(value: int):
run(command=["kubectl", "rollout", "status", "deployment", "metrics-generator"])


def wait_deployment_scale(*, name: str, replicas: int):
def wait_scale(*, kind: str, name: str, replicas: int):
run(
command=[
"kubectl",
"wait",
f"--for=jsonpath={{.spec.replicas}}={replicas}",
"deployment",
kind,
name,
f"--timeout={TIMEOUT}s",
]
)


def test_target_1(setup):
target_name = "target-1"
@pytest.mark.parametrize("target_name, kind", [("target-1", "deployment"), ("target-2", "statefulset")])
def test_target(setup, target_name: str, kind: str):
set_foo_metric_value(0)

deploy_target(f"{target_name}.yaml")

# The intial replicas count is 1
wait_deployment_scale(name=target_name, replicas=1)
wait_scale(kind=kind, name=target_name, replicas=1)

khstz = run_scaler()

try:
# The initial metric value is 0, it should scale the target to 0
wait_deployment_scale(name=target_name, replicas=0)
wait_scale(kind=kind, name=target_name, replicas=0)

# Increase the metric value
set_foo_metric_value(10)

# The deloyment was revived anf the HPA was able to scale it up
wait_deployment_scale(name=target_name, replicas=3)
wait_scale(kind=kind, name=target_name, replicas=3)
finally:
khstz.kill()
delete_target(f"{target_name}.yaml")
4 changes: 2 additions & 2 deletions tests/manifests/target-1.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,5 @@ spec:
spec:
terminationGracePeriodSeconds: 1
containers:
- name: nginx
image: nginx:latest
- name: nginx
image: nginx:stable-alpine-slim
64 changes: 64 additions & 0 deletions tests/manifests/target-2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: target-2
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: StatefulSet
name: target-2
minReplicas: 1
maxReplicas: 3
metrics:
- type: Object
object:
metric:
name: foo_metric
describedObject:
apiVersion: "/v1"
kind: Service
name: metrics-generator
target:
type: Value
value: 1

---

apiVersion: v1
kind: Service
metadata:
name: target-2
labels:
app: target-2
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: target-2

---

apiVersion: apps/v1
kind: StatefulSet
metadata:
name: target-2
spec:
replicas: 1
selector:
matchLabels:
app: target-2
serviceName: target-2
template:
metadata:
labels:
app: target-2
spec:
terminationGracePeriodSeconds: 1
containers:
- name: nginx
image: nginx:stable-alpine-slim
ports:
- containerPort: 80
name: web

0 comments on commit 67fc3fd

Please sign in to comment.