From dd4ae856f98358710614bd20718e260cf660fe97 Mon Sep 17 00:00:00 2001 From: knightXun Date: Wed, 18 Dec 2019 16:25:58 +0800 Subject: [PATCH 1/6] helm chart for nebula parent 8fd1b51886b107967c52834df2d0f6aa321f9046 author knightXun <1004815462@qq.com> 1581852404 +0800 committer knightXun <1004815462@qq.com> 1581852404 +0800 helm chart for nebula --- kubernetes/helm/.helmignore | 22 +++ kubernetes/helm/Chart.yaml | 18 ++ kubernetes/helm/templates/NOTES.txt | 1 + kubernetes/helm/templates/_helpers.tpl | 67 +++++++ kubernetes/helm/templates/configmap.yaml | 165 ++++++++++++++++ kubernetes/helm/templates/deployment.yaml | 72 +++++++ .../helm/templates/ingress-configmap.yaml | 9 + kubernetes/helm/templates/pdb.yaml | 12 ++ kubernetes/helm/templates/service.yaml | 67 +++++++ kubernetes/helm/templates/statefulset.yaml | 184 ++++++++++++++++++ kubernetes/helm/values.yaml | 108 ++++++++++ 11 files changed, 725 insertions(+) create mode 100644 kubernetes/helm/.helmignore create mode 100644 kubernetes/helm/Chart.yaml create mode 100644 kubernetes/helm/templates/NOTES.txt create mode 100644 kubernetes/helm/templates/_helpers.tpl create mode 100644 kubernetes/helm/templates/configmap.yaml create mode 100644 kubernetes/helm/templates/deployment.yaml create mode 100644 kubernetes/helm/templates/ingress-configmap.yaml create mode 100644 kubernetes/helm/templates/pdb.yaml create mode 100644 kubernetes/helm/templates/service.yaml create mode 100644 kubernetes/helm/templates/statefulset.yaml create mode 100644 kubernetes/helm/values.yaml diff --git a/kubernetes/helm/.helmignore b/kubernetes/helm/.helmignore new file mode 100644 index 00000000000..50af0317254 --- /dev/null +++ b/kubernetes/helm/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/kubernetes/helm/Chart.yaml b/kubernetes/helm/Chart.yaml new file mode 100644 index 00000000000..06dc123bbc4 --- /dev/null +++ b/kubernetes/helm/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: nebula +description: A distributed, fast open-source graph database + featuring horizontal scalability and high availability + https://nebula-graph.io +type: application +version: 0.1 +appVersion: 1.0.0-rc2 +keywords: +- graph-database +- distributed +- database +- graphdb +home: https://nebula-graph.io +maintainers: +- name: flyingcat + email: badgangkiller@gmail.com +engine: gotpl diff --git a/kubernetes/helm/templates/NOTES.txt b/kubernetes/helm/templates/NOTES.txt new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/kubernetes/helm/templates/NOTES.txt @@ -0,0 +1 @@ + diff --git a/kubernetes/helm/templates/_helpers.tpl b/kubernetes/helm/templates/_helpers.tpl new file mode 100644 index 00000000000..c0e4750ae42 --- /dev/null +++ b/kubernetes/helm/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nebula.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nebula.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nebula.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "nebula.labels" -}} +helm.sh/chart: {{ include "nebula.chart" . }} +{{ include "nebula.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "nebula.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nebula.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nebula.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nebula.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "metad.endpoints" -}} +{{- join "," .Values.MetadHosts -}} +{{- end -}} \ No newline at end of file diff --git a/kubernetes/helm/templates/configmap.yaml b/kubernetes/helm/templates/configmap.yaml new file mode 100644 index 00000000000..6ddce867e68 --- /dev/null +++ b/kubernetes/helm/templates/configmap.yaml @@ -0,0 +1,165 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-graphd + namespace: {{ .Values.namespace }} +data: + nebula-graphd.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-graphd.pid + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + # Whether to redirect stdout and stderr to separate output files + --redirect_stdout=true + # Destination filename of stdout and stderr, which will also reside in log_dir. + --stdout_log_file=stdout.log + --stderr_log_file=stderr.log + # Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively. + --stderrthreshold=2 + + ########## networking ########## + # Meta Server Address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.0.0 + # Network device to listen on + --listen_netdev=any + # Port to listen on + --port={{ .Values.port.graphd.thriftPort }} + # To turn on SO_REUSEPORT or not + --reuse_port=false + # Backlog of the listen socket, adjust this together with net.core.somaxconn + --listen_backlog=1024 + # Seconds before the idle connections are closed, 0 for never closed + --client_idle_timeout_secs=0 + # Seconds before the idle sessions are expired, 0 for no expiration + --session_idle_timeout_secs=60000 + # The number of threads to accept incoming connections + --num_accept_threads=1 + # The number of networking IO threads, 0 for # of CPU cores + --num_netio_threads=0 + # The number of threads to execute user queries, 0 for # of CPU cores + --num_worker_threads=0 + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.graphd.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.graphd.http2Port }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-metad + namespace: {{ .Values.namespace }} +data: + nebula-metad.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-metad.pid + + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + + ########## networking ########## + # Meta Server Address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.ยท0.0 + # Meta daemon listening port + --port={{ .Values.port.metad.thriftPort }} + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.metad.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.metad.http2Port }} + + ########## storage ########## + # Root data path, here should be only single path for metad + --data_path=data/meta + + ########## Misc ######### + # The default number of parts when a space is created + --default_parts_num=100 + # The default replica factor when a space is created + --default_replica_factor=1 + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-storaged + namespace: {{ .Values.namespace }} +data: + nebula-storaged.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-storaged.pid + + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + ########## networking ########## + # Meta server address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.0.0 + # Storage daemon listening port + --port={{ .Values.port.storaged.thriftPort }} + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.storaged.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.storaged.http2Port }} + + ########## storage ########## + # Root data path, multiple paths should be splitted by comma. + # One path per instance, if --engine_type is `rocksdb' + --data_path=data/storage + + # The default reserved bytes for one batch operation + --rocksdb_batch_size=4096 + # The default block cache size used in BlockBasedTable. + # The unit is MB. + --rocksdb_block_cache=4 + # The type of storage engine, `rocksdb', `memory', etc. + --engine_type=rocksdb + + ############## rocksdb Options ############## + --rocksdb_disable_wal=true + # rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma + --rocksdb_db_options={} + # rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma + --rocksdb_column_family_options={"write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"} + # rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma + --rocksdb_block_based_table_options={"block_size":"8192"} diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml new file mode 100644 index 00000000000..efe66c73ff9 --- /dev/null +++ b/kubernetes/helm/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nebula-graphd + name: nebula-graphd + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: nebula-graphd + replicas: {{ .Values.replication.graphd.replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: nebula-graphd + spec: + restartPolicy: Always + containers: + - name: nebula-graphd + image: "{{ .Values.image.graphd.repository }}:{{ .Values.image.graphd.tag }}" + imagePullPolicy: {{ .Values.image.graphd.pullPolicy }} + resources: + requests: + cpu: {{ .Values.resources.graphd.requests.cpu | quote }} + memory: {{ .Values.resources.graphd.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.graphd.limits.cpu | quote }} + memory: {{ .Values.resources.graphd.limits.memory | quote }} + ports: + - name: thrift + containerPort: {{ .Values.port.graphd.thriftPort }} + - name: http + containerPort: {{ .Values.port.graphd.httpPort }} + - name: http2 + containerPort: {{ .Values.port.graphd.http2Port }} + command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf"] + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.graphd.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end }} + resources: + #TODO: Change these to appropriate values for the hardware that you're running. + requests: + cpu: "{{ .Values.resources.graphd.requests.cpu }}" + memory: "{{ .Values.resources.graphd.requests.memory }}" + limits: + cpu: "{{ .Values.resources.graphd.limits.cpu }}" + memory: "{{ .Values.resources.graphd.limits.memory }}" + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: timezone + mountPath: /etc/localtime + volumes: + - name: timezone + hostPath: + path: /etc/localtime + - name: config + configMap: + name: nebula-graphd + + diff --git a/kubernetes/helm/templates/ingress-configmap.yaml b/kubernetes/helm/templates/ingress-configmap.yaml new file mode 100644 index 00000000000..b03efdc216b --- /dev/null +++ b/kubernetes/helm/templates/ingress-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: graphd-services + namespace: {{ .Values.namespace }} +data: + 3699: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.thriftPort }}" + 13000: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.httpPort }}" + 13002: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.http2Port }}" diff --git a/kubernetes/helm/templates/pdb.yaml b/kubernetes/helm/templates/pdb.yaml new file mode 100644 index 00000000000..acff368756c --- /dev/null +++ b/kubernetes/helm/templates/pdb.yaml @@ -0,0 +1,12 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: nebula-budget + labels: + app: nebula-budget +spec: + selector: + matchLabels: + app: nebula-storaged + app: nebula-metad + maxUnavailable: 1 \ No newline at end of file diff --git a/kubernetes/helm/templates/service.yaml b/kubernetes/helm/templates/service.yaml new file mode 100644 index 00000000000..2eebce8c3a4 --- /dev/null +++ b/kubernetes/helm/templates/service.yaml @@ -0,0 +1,67 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-metad + name: nebula-metad + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + ports: + - name: thrift + port: {{ .Values.port.metad.thriftPort }} + - name: raft + port: {{ .Values.port.metad.raftPort }} + - name: http + port: {{ .Values.port.metad.httpPort }} + - name: http2 + port: {{ .Values.port.metad.http2Port }} + selector: + app: nebula-metad +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-storaged + name: nebula-storaged + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + ports: + - name: thrift + port: {{ .Values.port.storaged.thriftPort }} + - name: raft + port: {{ .Values.port.storaged.raftPort }} + - name: http + port: {{ .Values.port.storaged.httpPort }} + - name: http2 + port: {{ .Values.port.storaged.http2Port }} + selector: + app: nebula-storaged +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-graphd + name: nebula-graphd + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + type: NodePort + ports: + - name: thrift + port: {{ .Values.port.graphd.thriftPort }} + - name: http + port: {{ .Values.port.graphd.httpPort }} + - name: http2 + port: {{ .Values.port.graphd.http2Port }} + selector: + app: nebula-graphd \ No newline at end of file diff --git a/kubernetes/helm/templates/statefulset.yaml b/kubernetes/helm/templates/statefulset.yaml new file mode 100644 index 00000000000..5a523f6aa73 --- /dev/null +++ b/kubernetes/helm/templates/statefulset.yaml @@ -0,0 +1,184 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: nebula-metad + namespace: {{ .Values.namespace }} + labels: + app: nebula-metad +spec: + serviceName: nebula-metad + replicas: {{ .Values.replication.metad.replicas }} + selector: + matchLabels: + app: nebula-metad + template: + metadata: + labels: + app: nebula-metad + spec: + terminationGracePeriodSeconds: 60 + # hostname: meta.nebula.services + restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + hostNetwork: true + containers: + - name: nebula-metad + image: "{{ .Values.image.metad.repository }}:{{ .Values.image.metad.tag }}" + imagePullPolicy: {{ .Values.image.metad.pullPolicy }} + resources: + requests: + cpu: {{ .Values.resources.metad.requests.cpu | quote }} + memory: {{ .Values.resources.metad.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.metad.limits.cpu | quote }} + memory: {{ .Values.resources.metad.limits.memory | quote }} + ports: + - containerPort: {{ .Values.port.metad.thriftPort }} + name: thrift + - containerPort: {{ .Values.port.metad.raftPort }} + name: raft + - containerPort: {{ .Values.port.metad.httpPort }} + name: http + - containerPort: {{ .Values.port.metad.http2Port }} + name: http2 + command: + - "/bin/bash" + - "-ecx" + - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i)" + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.metad.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end }} + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: data + mountPath: /usr/local/nebula/data + - name: timezone + mountPath: /etc/localtime + - name: log + mountPath: /usr/local/nebula/logs + volumes: + - name: config + configMap: + name: nebula-metad + - name: timezone + hostPath: + path: /etc/localtime + - name: data + persistentVolumeClaim: + claimName: data + - name: log + hostPath: + path: /var/log + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storage.storageClass }} + resources: + requests: + storage: {{ .Values.storage.metad.size }} + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: nebula-storaged + labels: + app: nebula-storaged +spec: + serviceName: nebula-storaged + replicas: {{ .Values.replication.storaged.replicas }} + selector: + matchLabels: + app: nebula-storaged + template: + metadata: + labels: + app: nebula-storaged + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + hostNetwork: true + containers: + - name: nebula-storaged + image: "{{ .Values.image.storaged.repository }}:{{ .Values.image.storaged.tag }}" + imagePullPolicy: {{ .Values.image.storaged.pullPolicy }} + resources: + requests: + cpu: {{ .Values.resources.storaged.requests.cpu | quote }} + memory: {{ .Values.resources.storaged.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.storaged.limits.cpu | quote }} + memory: {{ .Values.resources.storaged.limits.memory | quote }} + ports: + - containerPort: {{ .Values.port.storaged.thriftPort }} + name: thrift + - containerPort: {{ .Values.port.storaged.raftPort }} + name: raft + - containerPort: {{ .Values.port.storaged.httpPort }} + name: http + - containerPort: {{ .Values.port.storaged.http2Port }} + name: http2 + command: + - "/bin/bash" + - "-ecx" + - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i)" + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.storaged.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end}} + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: data + mountPath: /usr/local/nebula/data + - name: timezone + mountPath: /etc/localtime + - name: log + mountPath: /usr/local/nebula/logs + volumes: + - name: config + configMap: + name: nebula-storaged + - name: timezone + hostPath: + path: /etc/localtime + - name: data + persistentVolumeClaim: + claimName: data + - name: log + hostPath: + path: /var/log + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storage.storageClass }} + resources: + requests: + storage: {{ .Values.storage.storaged.size }} diff --git a/kubernetes/helm/values.yaml b/kubernetes/helm/values.yaml new file mode 100644 index 00000000000..b4b38e2674a --- /dev/null +++ b/kubernetes/helm/values.yaml @@ -0,0 +1,108 @@ +# specify kubernetes namespace +namespace: default + +# the nebula images +image: + # the nebula-graphd images,change tag to use a different version. + graphd: + repository: vesoft/nebula-graphd + tag: nightly + pullPolicy: Always + # the nebula-metad images,change tag to use a different version. + metad: + repository: vesoft/nebula-metad + tag: nightly + pullPolicy: Always + # the nebula storaged images,change tag to use a different version. + storaged: + repository: vesoft/nebula-storaged + tag: nightly + pullPolicy: Always + +# set livenessProbe, kubelet will check nebula pod alive +livenessProbe: + graphd: + Enable: true + metad: + Enable: true + storaged: + Enable: true + +## Optional resource requests and limits for the nebula container +resources: + graphd: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + + metad: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + + storaged: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + +replication: + ## Number of nebula-storaged replicas + storaged: + replicas: 3 + ## Number of nebula-metad replicas + metad: + replicas: 3 + ## Number of nebula-graphd replicas + graphd: + replicas: 3 + +port: + ## Service And Container Port Setting of Nebula-Graphd + graphd: + serviceType: ClusterIP + thriftPort: 3699 + httpPort: 13000 + http2Port: 13002 + ## Service And Container Port Setting of Nebula-Storaged + storaged: + serviceType: ClusterIP + thriftPort: 45500 + raftPort: 45501 + httpPort: 12000 + http2Port: 12002 + ## Service And Container Port Setting of Nebula-Metad + metad: + serviceType: ClusterIP + thriftPort: 44500 + raftPort: 44501 + httpPort: 11000 + http2Port: 11002 + +MetadHosts: + - 192.168.8.21:44500 + - 192.168.8.22:44500 + - 192.168.8.23:44500 + +## Storage Setting of Nebula-Metad and Nebula-Storaged +storage: + storageClass: fast-disks + storaged: + size: 20Gi + metad: + size: 20Gi + +nodeSelector: + nebula: "yes" + +tolerations: [] + +affinity: {} From 8a61361779939d1b04697632419f87dc69fbcf67 Mon Sep 17 00:00:00 2001 From: knightXun <1004815462@qq.com> Date: Tue, 18 Feb 2020 23:00:54 +0800 Subject: [PATCH 2/6] add nodeselector --- kubernetes/helm/templates/deployment.yaml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml index efe66c73ff9..cc767802288 100644 --- a/kubernetes/helm/templates/deployment.yaml +++ b/kubernetes/helm/templates/deployment.yaml @@ -21,6 +21,10 @@ spec: app: nebula-graphd spec: restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} containers: - name: nebula-graphd image: "{{ .Values.image.graphd.repository }}:{{ .Values.image.graphd.tag }}" @@ -59,14 +63,9 @@ spec: volumeMounts: - name: config mountPath: /usr/local/nebula/etc/ - - name: timezone - mountPath: /etc/localtime volumes: - - name: timezone - hostPath: - path: /etc/localtime - name: config configMap: name: nebula-graphd - + \ No newline at end of file From cdbc2ef6e17429890667c01f5155dbbad954bf0a Mon Sep 17 00:00:00 2001 From: knightXun <1004815462@qq.com> Date: Tue, 18 Feb 2020 23:02:20 +0800 Subject: [PATCH 3/6] add time volume --- kubernetes/helm/templates/deployment.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml index cc767802288..f272f0c3694 100644 --- a/kubernetes/helm/templates/deployment.yaml +++ b/kubernetes/helm/templates/deployment.yaml @@ -63,7 +63,12 @@ spec: volumeMounts: - name: config mountPath: /usr/local/nebula/etc/ + - name: timezone + mountPath: /etc/localtime volumes: + - name: timezone + hostPath: + path: /etc/localtime - name: config configMap: name: nebula-graphd From 59b580ff1483583b73e48586d799bb52f87cdda1 Mon Sep 17 00:00:00 2001 From: knightXun <1004815462@qq.com> Date: Mon, 2 Mar 2020 12:46:50 +0800 Subject: [PATCH 4/6] add log flags --- kubernetes/helm/templates/deployment.yaml | 2 +- kubernetes/helm/templates/statefulset.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml index f272f0c3694..993f0a34ec8 100644 --- a/kubernetes/helm/templates/deployment.yaml +++ b/kubernetes/helm/templates/deployment.yaml @@ -43,7 +43,7 @@ spec: containerPort: {{ .Values.port.graphd.httpPort }} - name: http2 containerPort: {{ .Values.port.graphd.http2Port }} - command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf"] + command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf", "--v=0", "--minloglevel=2"] {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: diff --git a/kubernetes/helm/templates/statefulset.yaml b/kubernetes/helm/templates/statefulset.yaml index 5a523f6aa73..9cdb7cab0fe 100644 --- a/kubernetes/helm/templates/statefulset.yaml +++ b/kubernetes/helm/templates/statefulset.yaml @@ -47,7 +47,7 @@ spec: command: - "/bin/bash" - "-ecx" - - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i)" + - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2" {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: @@ -139,7 +139,7 @@ spec: command: - "/bin/bash" - "-ecx" - - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i)" + - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2" {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: From df1e77e9c8c9e2228e2cfdcb2628cafe4e9624eb Mon Sep 17 00:00:00 2001 From: knightXun <1004815462@qq.com> Date: Mon, 2 Mar 2020 12:52:13 +0800 Subject: [PATCH 5/6] add --daemonize=false --- kubernetes/helm/templates/deployment.yaml | 2 +- kubernetes/helm/templates/statefulset.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml index 993f0a34ec8..9f1da7a6464 100644 --- a/kubernetes/helm/templates/deployment.yaml +++ b/kubernetes/helm/templates/deployment.yaml @@ -43,7 +43,7 @@ spec: containerPort: {{ .Values.port.graphd.httpPort }} - name: http2 containerPort: {{ .Values.port.graphd.http2Port }} - command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf", "--v=0", "--minloglevel=2"] + command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf", "--v=0", "--minloglevel=2", "--daemonize=false"] {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: diff --git a/kubernetes/helm/templates/statefulset.yaml b/kubernetes/helm/templates/statefulset.yaml index 9cdb7cab0fe..22ea4bdac22 100644 --- a/kubernetes/helm/templates/statefulset.yaml +++ b/kubernetes/helm/templates/statefulset.yaml @@ -47,7 +47,7 @@ spec: command: - "/bin/bash" - "-ecx" - - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2" + - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2 --daemonize=false" {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: @@ -139,7 +139,7 @@ spec: command: - "/bin/bash" - "-ecx" - - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2" + - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2 --daemonize=false" {{- if .Values.livenessProbe.graphd.Enable }} livenessProbe: httpGet: From ae0d65b951abc30babbdc71f733d0128902a8ade Mon Sep 17 00:00:00 2001 From: knightXun <1004815462@qq.com> Date: Mon, 2 Mar 2020 15:28:48 +0800 Subject: [PATCH 6/6] add default user --- kubernetes/helm/templates/deployment.yaml | 3 +++ kubernetes/helm/templates/statefulset.yaml | 6 ++++++ kubernetes/helm/values.yaml | 4 ++-- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml index 9f1da7a6464..8f1d46d8802 100644 --- a/kubernetes/helm/templates/deployment.yaml +++ b/kubernetes/helm/templates/deployment.yaml @@ -29,6 +29,9 @@ spec: - name: nebula-graphd image: "{{ .Values.image.graphd.repository }}:{{ .Values.image.graphd.tag }}" imagePullPolicy: {{ .Values.image.graphd.pullPolicy }} + env: + - name: USER + value: root resources: requests: cpu: {{ .Values.resources.graphd.requests.cpu | quote }} diff --git a/kubernetes/helm/templates/statefulset.yaml b/kubernetes/helm/templates/statefulset.yaml index 22ea4bdac22..c2c56484188 100644 --- a/kubernetes/helm/templates/statefulset.yaml +++ b/kubernetes/helm/templates/statefulset.yaml @@ -28,6 +28,9 @@ spec: - name: nebula-metad image: "{{ .Values.image.metad.repository }}:{{ .Values.image.metad.tag }}" imagePullPolicy: {{ .Values.image.metad.pullPolicy }} + env: + - name: USER + value: root resources: requests: cpu: {{ .Values.resources.metad.requests.cpu | quote }} @@ -120,6 +123,9 @@ spec: - name: nebula-storaged image: "{{ .Values.image.storaged.repository }}:{{ .Values.image.storaged.tag }}" imagePullPolicy: {{ .Values.image.storaged.pullPolicy }} + env: + - name: USER + value: root resources: requests: cpu: {{ .Values.resources.storaged.requests.cpu | quote }} diff --git a/kubernetes/helm/values.yaml b/kubernetes/helm/values.yaml index b4b38e2674a..217a912a7c1 100644 --- a/kubernetes/helm/values.yaml +++ b/kubernetes/helm/values.yaml @@ -75,8 +75,8 @@ port: ## Service And Container Port Setting of Nebula-Storaged storaged: serviceType: ClusterIP - thriftPort: 45500 - raftPort: 45501 + thriftPort: 45600 + raftPort: 45601 httpPort: 12000 http2Port: 12002 ## Service And Container Port Setting of Nebula-Metad