-
Notifications
You must be signed in to change notification settings - Fork 65
/
common.values.yaml
287 lines (285 loc) · 11.3 KB
/
common.values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
basehub:
nfs:
enabled: true
volumeReporter:
enabled: false
pv:
enabled: true
# from https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-nfs-mount-settings.html
mountOptions:
- rsize=1048576
- wsize=1048576
- timeo=600
- soft # We pick soft over hard, so NFS lockups don't lead to hung processes
- retrans=2
- noresvport
serverIP: fs-08b7410bc122c9d70.efs.us-west-2.amazonaws.com
baseShareName: /
dask-gateway:
enabled: true
jupyterhub:
custom:
daskhubSetup:
enabled: true
2i2c:
add_staff_user_ids_to_admin_users: true
add_staff_user_ids_of_type: "github"
jupyterhubConfigurator:
enabled: false
homepage:
templateVars:
org:
name: "The Visualization, Exploration, and Data Analysis (VEDA) Project"
logo_url: https://visex.netlify.app/graphics/nasa-veda-logo-pos.svg
url: https://www.earthdata.nasa.gov/esds/veda
designed_by:
name: "2i2c"
url: https://2i2c.org
operated_by:
name: "2i2c"
url: https://2i2c.org
funded_by:
name: "NASA"
url: https://www.earthdata.nasa.gov/esds
hub:
allowNamedServers: true
config:
JupyterHub:
authenticator_class: github
GitHubOAuthenticator:
populate_teams_in_auth_state: true
allowed_organizations:
- CASI-LIS-Dashboard:dev-veda-jupyterhub
- veda-analytics-access:all-users
- veda-analytics-access:collaborator-access
- CYGNSS-VEDA:cygnss-iwg
- veda-analytics-access:maap-biomass-team
- Earth-Information-System:eis-fire
- Earth-Information-System:swot
- veda-analytics-access:harvard-data-team
scope:
- read:org
Authenticator:
enable_auth_state: true
admin_users:
- abarciauskas-bgse
- freitagb
- j08lue
- rezuma
- ranchodeluxe
- jsignell
- slesaad
- wildintellect
- amarouane-ABDELHAK
singleuser:
cloudMetadata:
blockWithIptables: false
defaultUrl: /lab
extraEnv:
GH_SCOPED_CREDS_CLIENT_ID: "Iv23liG9LZ45xmB20syA"
GH_SCOPED_CREDS_APP_URL: https://github.com/apps/veda-hub-github-scoped-creds
initContainers:
- &volume_ownership_fix_initcontainer
name: volume-mount-ownership-fix
image: busybox:1.36.1
command:
- sh
- -c
- id && chown 1000:1000 /home/jovyan /home/jovyan/shared /home/jovyan/shared-public && ls -lhd /home/jovyan
securityContext:
runAsUser: 0
volumeMounts:
- name: home
mountPath: /home/jovyan
subPath: "{username}"
# Mounted without readonly attribute here,
# so we can chown it appropriately
- name: home
mountPath: /home/jovyan/shared
subPath: _shared
- name: home
mountPath: /home/jovyan/shared-public
subPath: _shared-public
storage:
extraVolumeMounts:
- name: home
mountPath: /home/jovyan/shared-public
subPath: _shared-public
readOnly: false
- name: home
mountPath: /home/rstudio/shared-public
subPath: _shared-public
readOnly: false
- name: home
mountPath: /home/jovyan/shared
subPath: _shared
readOnly: true
- name: dev-shm
mountPath: /dev/shm
profileList:
- display_name: Choose your environment and resources
default: true
profile_options:
image:
display_name: Environment
unlisted_choice:
enabled: True
display_name: "Custom image"
validation_regex: "^.+:.+$"
validation_message: "Must be a publicly available docker image, of form <image-name>:<tag>"
kubespawner_override:
image: "{value}"
choices:
01-modify-pangeo:
display_name: Modified Pangeo Notebook
description: Pangeo based notebook with a Python environment
kubespawner_override:
image: public.ecr.aws/nasa-veda/pangeo-notebook-veda-image:2024.08.18-v1
init_containers:
# Need to explicitly fix ownership here, as otherwise these directories will be owned
# by root on most NFS filesystems - neither EFS nor Google Filestore support anonuid
- *volume_ownership_fix_initcontainer
# this container uses nbgitpuller to mount https://github.com/NASA-IMPACT/veda-docs/ for user pods
# image source: https://github.com/NASA-IMPACT/jupyterhub-gitpuller-init
- name: jupyterhub-gitpuller-init
image: public.ecr.aws/nasa-veda/jupyterhub-gitpuller-init:97eb45f9d23b128aff810e45911857d5cffd05c2
env:
- name: TARGET_PATH
value: veda-docs
- name: SOURCE_REPO
value: "https://github.com/NASA-IMPACT/veda-docs"
volumeMounts:
- name: home
mountPath: /home/jovyan
subPath: "{username}"
securityContext:
runAsUser: 1000
runAsGroup: 1000
02-rocker:
display_name: Rocker Geospatial with RStudio
description: R environment with many geospatial libraries pre-installed
kubespawner_override:
image: rocker/binder:4.3
image_pull_policy: Always
# Launch RStudio after the user logs in
default_url: /rstudio
# Ensures container working dir is homedir
# https://github.com/2i2c-org/infrastructure/issues/2559
working_dir: /home/rstudio
03-qgis:
display_name: QGIS on Linux Desktop
description: Linux desktop in the browser, with qgis installed
kubespawner_override:
# Launch people directly into the Linux desktop when they start
default_url: /desktop
# Built from https://github.com/2i2c-org/nasa-qgis-image
image: quay.io/2i2c/nasa-qgis-image:d76118ea0c15
resource_allocation:
display_name: Resource Allocation
choices:
mem_1_9:
display_name: 1.9 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 1991244775
mem_limit: 1991244775
cpu_guarantee: 0.2328125
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
default: true
mem_3_7:
display_name: 3.7 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 3982489550
mem_limit: 3982489550
cpu_guarantee: 0.465625
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_7_4:
display_name: 7.4 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 7964979101
mem_limit: 7964979101
cpu_guarantee: 0.93125
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_14_8:
display_name: 14.8 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 15929958203
mem_limit: 15929958203
cpu_guarantee: 1.8625
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_29_7:
display_name: 29.7 GB RAM, upto 3.7 CPUs
kubespawner_override:
mem_guarantee: 31859916406
mem_limit: 31859916406
cpu_guarantee: 3.725
cpu_limit: 3.725
node_selector:
node.kubernetes.io/instance-type: r5.xlarge
mem_60_6:
display_name: 60.6 GB RAM, upto 15.6 CPUs
kubespawner_override:
mem_guarantee: 65094448840
mem_limit: 65094448840
cpu_guarantee: 7.8475
cpu_limit: 15.695
node_selector:
node.kubernetes.io/instance-type: r5.4xlarge
mem_121_2:
display_name: 121.2 GB RAM, upto 15.6 CPUs
kubespawner_override:
mem_guarantee: 130188897681
mem_limit: 130188897681
cpu_guarantee: 15.695
cpu_limit: 15.695
node_selector:
node.kubernetes.io/instance-type: r5.4xlarge
- display_name: NVIDIA Tesla T4, ~16 GB, ~4 CPUs
description: "Start a container on a dedicated node with a GPU"
slug: "gpu"
allowed_groups:
- veda-analytics-access:gpu
- 2i2c-org:hub-access-for-2i2c-staff
- veda-analytics-access:harvard-data-team
profile_options:
image:
display_name: Environment
unlisted_choice:
enabled: true
display_name: "Custom image"
validation_regex: "^.+:.+$"
validation_message: "Must be a publicly available docker image of form <image-name>:<tag>"
kubespawner_override:
image: "{value}"
choices:
pytorch:
display_name: Pangeo PyTorch ML Notebook
default: false
slug: "pytorch"
kubespawner_override:
image: "quay.io/pangeo/pytorch-notebook:2024.11.11"
tensorflow2:
display_name: Pangeo Tensorflow2 ML Notebook
default: true
slug: "tensorflow2"
kubespawner_override:
image: "quay.io/pangeo/ml-notebook:2024.11.11"
kubespawner_override:
environment:
NVIDIA_DRIVER_CAPABILITIES: compute,utility
mem_limit: null
mem_guarantee: 14G
node_selector:
node.kubernetes.io/instance-type: g4dn.xlarge
extra_resource_limits:
nvidia.com/gpu: "1"
scheduling:
userScheduler:
enabled: true