-
Notifications
You must be signed in to change notification settings - Fork 32
/
values.yaml
151 lines (140 loc) · 5.75 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
daskhub:
# Pangeo configuration values
# --------------------
# The following configuration options are specifically for the custom Pangeo components.
# Some values will be inherited into the dependent chart configs. See requirements.yaml for info.
# Create and use roles and service accounts on an RBAC enabled cluster.
rbac:
enabled: true
# Dependency configuration values
# -------------------------------
# To configure dependencies you must create a key with the name of the dependency
# and then configure as per the dependent chart's values.yaml. Don't forget to indent!
jupyterhub:
# Helm config for jupyterhub goes here
# See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/jupyterhub/values.yaml
singleuser:
cpu:
limit: 2
guarantee: 1
memory:
limit: 4G
guarantee: 2G
extraEnv:
# The default worker image matches the singleuser image.
DASK_GATEWAY__CLUSTER__OPTIONS__IMAGE: '{JUPYTER_IMAGE_SPEC}'
DASK_DISTRIBUTED__DASHBOARD_LINK: '/user/{JUPYTERHUB_USER}/proxy/{port}/status'
DASK_LABEXTENSION__FACTORY__MODULE: 'dask_gateway'
DASK_LABEXTENSION__FACTORY__CLASS: 'GatewayCluster'
serviceAccountName: pangeo
prePuller:
hook:
enabled: false
scheduling:
userScheduler:
enabled: true
podPriority:
enabled: true
userPlaceholder:
enabled: false
hub:
service:
annotations:
prometheus.io/scrape: 'true'
prometheus.io/path: '/hub/metrics'
extraConfig:
prometheus: |
c.JupyterHub.authenticate_prometheus = False
networkPolicy:
# Disable hub network Policy, so that the dask gateawy server API can
# reach the hub directly. Only applies to JupyterHub>=0.10 and
# can be removed for dask-gateway>0.9.0
# https://github.com/dask/helm-chart/issues/142
enabled: false
dask-gateway:
gateway:
backend:
scheduler:
extraPodConfig:
serviceAccountName: pangeo
tolerations:
- key: "k8s.dask.org/dedicated"
operator: "Equal"
value: "scheduler"
effect: "NoSchedule"
- key: "k8s.dask.org_dedicated"
operator: "Equal"
value: "scheduler"
effect: "NoSchedule"
worker:
extraContainerConfig:
securityContext:
runAsGroup: 1000
runAsUser: 1000
extraPodConfig:
serviceAccountName: pangeo
automountServiceAccountToken: true
securityContext:
fsGroup: 1000
tolerations:
- key: "k8s.dask.org/dedicated"
operator: "Equal"
value: "worker"
effect: "NoSchedule"
- key: "k8s.dask.org_dedicated"
operator: "Equal"
value: "worker"
effect: "NoSchedule"
# TODO: figure out a replacement for userLimits.
extraConfig:
optionHandler: |
from dask_gateway_server.options import Options, Float, String, Mapping
def cluster_options(user):
def option_handler(options):
if ":" not in options.image:
raise ValueError("When specifying an image you must also provide a tag")
extra_annotations = {
"hub.jupyter.org/username": user.name,
"prometheus.io/scrape": "true",
"prometheus.io/port": "8787",
}
extra_labels = {
"hub.jupyter.org/username": user.name,
}
# We multiply the requests by 0.95 to ensure that that they
# pack well onto nodes. Kubernetes reserves a small fraction
# of the memory / CPU for itself, so the common situation of
# a node with 4 cores and a user requesting 4 cores means
# we request just over half of the *allocatable* CPU, and so
# we can't pack more than 1 worker on that node.
# On GCP, the kubernetes requests are ~12% of the CPU.
return {
"worker_cores": 0.88 * min(options.worker_cores / 2, 1),
"worker_cores_limit": options.worker_cores,
"worker_memory": "%fG" % (0.95 * options.worker_memory),
"worker_memory_limit": "%fG" % options.worker_memory,
"image": options.image,
"scheduler_extra_pod_annotations": extra_annotations,
"worker_extra_pod_annotations": extra_annotations,
"scheduler_extra_pod_labels": extra_labels,
"worker_extra_pod_labels": extra_labels,
"environment": options.environment,
}
return Options(
Float("worker_cores", 2, min=1, max=16, label="Worker Cores"),
Float("worker_memory", 8, min=1, max=32, label="Worker Memory (GiB)"),
String("image", default="pangeo/pangeo-notebook:latest", label="Image"),
Mapping("environment", {}, label="Environment Variables"),
handler=option_handler,
)
c.Backend.cluster_options = cluster_options
idle: |
# timeout after 30 minutes of inactivity
c.KubeClusterConfig.idle_timeout = 1800
limits: |
# per Dask cluster limits.
c.ClusterConfig.cluster_max_cores = 100
c.ClusterConfig.cluster_max_memory = "600G"
homeDirectories:
nfs:
enabled: false