-
Notifications
You must be signed in to change notification settings - Fork 65
/
home-space-reporter.yaml
76 lines (76 loc) · 2.82 KB
/
home-space-reporter.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
{{- if .Values.nfs.enabled }}
{{- if .Values.nfs.volumeReporter.enabled }}
# To provide data for the jupyterhub/grafana-dashboards dashboard about free
# space in the shared volume, which contains users home folders etc, we deploy
# prometheus node-exporter to collect this data for prometheus server to scrape.
#
# This prometheus metrics exporter will when used against a shared EFS storage
# (AWS NFS service) report an extreme amount of available disk space, making the
# available space remaining 100%. This makes the jupyterhub/grafana-dashboards
# graph reporting remaining available disk space in % quite pointless. Due to
# that, it can make sense to disable this if EFS is used to provide storage as
# it becomes another pod that eats up the available pods per node.
#
# This is based on the Deployment manifest in jupyterhub/grafana-dashboards'
# readme: https://github.com/jupyterhub/grafana-dashboards#additional-collectors
#
apiVersion: apps/v1
kind: Deployment
metadata:
name: shared-volume-metrics
labels:
app: jupyterhub
component: shared-volume-metrics
spec:
replicas: 1
selector:
matchLabels:
app: jupyterhub
component: shared-volume-metrics
template:
metadata:
annotations:
# This enables prometheus to actually scrape metrics from here
prometheus.io/scrape: "true"
prometheus.io/port: "9100"
labels:
app: jupyterhub
# The component label below should match a grafana dashboard definition
# in jupyterhub/grafana-dashboards, do not change it!
component: shared-volume-metrics
spec:
containers:
- name: shared-volume-exporter
image: quay.io/prometheus/node-exporter:v1.5.0
args:
# We only want filesystem stats
- --collector.disable-defaults
- --collector.filesystem
- --web.listen-address=:9100
ports:
- containerPort: 9100
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
volumeMounts:
- name: shared-volume
# Mounting under /shared-volume is important as we reference this
# path in our dashboard definition.
mountPath: /shared-volume
# Mount it readonly to prevent accidental writes
readOnly: true
securityContext:
fsGroup: 65534
volumes:
# This is the volume that we will mount and monitor. You should reference
# a shared volume containing home directories etc. This is often a PVC
# bound to a PV referencing a NFS server.
- name: shared-volume
persistentVolumeClaim:
claimName: home-nfs
{{- end }}
{{- end }}