diff --git a/kubernetes/helm/.helmignore b/kubernetes/helm/.helmignore new file mode 100644 index 00000000000..50af0317254 --- /dev/null +++ b/kubernetes/helm/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/kubernetes/helm/Chart.yaml b/kubernetes/helm/Chart.yaml new file mode 100644 index 00000000000..06dc123bbc4 --- /dev/null +++ b/kubernetes/helm/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +name: nebula +description: A distributed, fast open-source graph database + featuring horizontal scalability and high availability + https://nebula-graph.io +type: application +version: 0.1 +appVersion: 1.0.0-rc2 +keywords: +- graph-database +- distributed +- database +- graphdb +home: https://nebula-graph.io +maintainers: +- name: flyingcat + email: badgangkiller@gmail.com +engine: gotpl diff --git a/kubernetes/helm/templates/NOTES.txt b/kubernetes/helm/templates/NOTES.txt new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/kubernetes/helm/templates/NOTES.txt @@ -0,0 +1 @@ + diff --git a/kubernetes/helm/templates/_helpers.tpl b/kubernetes/helm/templates/_helpers.tpl new file mode 100644 index 00000000000..c0e4750ae42 --- /dev/null +++ b/kubernetes/helm/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "nebula.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nebula.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nebula.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "nebula.labels" -}} +helm.sh/chart: {{ include "nebula.chart" . }} +{{ include "nebula.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "nebula.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nebula.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nebula.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "nebula.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "metad.endpoints" -}} +{{- join "," .Values.MetadHosts -}} +{{- end -}} \ No newline at end of file diff --git a/kubernetes/helm/templates/configmap.yaml b/kubernetes/helm/templates/configmap.yaml new file mode 100644 index 00000000000..6ddce867e68 --- /dev/null +++ b/kubernetes/helm/templates/configmap.yaml @@ -0,0 +1,165 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-graphd + namespace: {{ .Values.namespace }} +data: + nebula-graphd.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-graphd.pid + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + # Whether to redirect stdout and stderr to separate output files + --redirect_stdout=true + # Destination filename of stdout and stderr, which will also reside in log_dir. + --stdout_log_file=stdout.log + --stderr_log_file=stderr.log + # Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively. + --stderrthreshold=2 + + ########## networking ########## + # Meta Server Address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.0.0 + # Network device to listen on + --listen_netdev=any + # Port to listen on + --port={{ .Values.port.graphd.thriftPort }} + # To turn on SO_REUSEPORT or not + --reuse_port=false + # Backlog of the listen socket, adjust this together with net.core.somaxconn + --listen_backlog=1024 + # Seconds before the idle connections are closed, 0 for never closed + --client_idle_timeout_secs=0 + # Seconds before the idle sessions are expired, 0 for no expiration + --session_idle_timeout_secs=60000 + # The number of threads to accept incoming connections + --num_accept_threads=1 + # The number of networking IO threads, 0 for # of CPU cores + --num_netio_threads=0 + # The number of threads to execute user queries, 0 for # of CPU cores + --num_worker_threads=0 + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.graphd.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.graphd.http2Port }} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-metad + namespace: {{ .Values.namespace }} +data: + nebula-metad.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-metad.pid + + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + + ########## networking ########## + # Meta Server Address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.ยท0.0 + # Meta daemon listening port + --port={{ .Values.port.metad.thriftPort }} + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.metad.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.metad.http2Port }} + + ########## storage ########## + # Root data path, here should be only single path for metad + --data_path=data/meta + + ########## Misc ######### + # The default number of parts when a space is created + --default_parts_num=100 + # The default replica factor when a space is created + --default_replica_factor=1 + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nebula-storaged + namespace: {{ .Values.namespace }} +data: + nebula-storaged.conf: | + ########## basics ########## + # Whether to run as a daemon process + --daemonize=false + # The file to host the process id + --pid_file=pids/nebula-storaged.pid + + ########## logging ########## + # The directory to host logging files, which must already exists + --log_dir=logs + # Log level, 0, 1, 2, 3 for INFO, WARNING, ERROR, FATAL respectively + --minloglevel=2 + # Verbose log level, 1, 2, 3, 4, the higher of the level, the more verbose of the logging + --v=0 + # Maximum seconds to buffer the log messages + --logbufsecs=0 + ########## networking ########## + # Meta server address + --meta_server_addrs={{ template "metad.endpoints" . }} + # Local ip + --local_ip=0.0.0.0 + # Storage daemon listening port + --port={{ .Values.port.storaged.thriftPort }} + # HTTP service ip + --ws_ip=0.0.0.0 + # HTTP service port + --ws_http_port={{ .Values.port.storaged.httpPort }} + # HTTP2 service port + --ws_h2_port={{ .Values.port.storaged.http2Port }} + + ########## storage ########## + # Root data path, multiple paths should be splitted by comma. + # One path per instance, if --engine_type is `rocksdb' + --data_path=data/storage + + # The default reserved bytes for one batch operation + --rocksdb_batch_size=4096 + # The default block cache size used in BlockBasedTable. + # The unit is MB. + --rocksdb_block_cache=4 + # The type of storage engine, `rocksdb', `memory', etc. + --engine_type=rocksdb + + ############## rocksdb Options ############## + --rocksdb_disable_wal=true + # rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma + --rocksdb_db_options={} + # rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma + --rocksdb_column_family_options={"write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"} + # rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma + --rocksdb_block_based_table_options={"block_size":"8192"} diff --git a/kubernetes/helm/templates/deployment.yaml b/kubernetes/helm/templates/deployment.yaml new file mode 100644 index 00000000000..8f1d46d8802 --- /dev/null +++ b/kubernetes/helm/templates/deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nebula-graphd + name: nebula-graphd + namespace: {{ .Values.namespace }} +spec: + selector: + matchLabels: + app: nebula-graphd + replicas: {{ .Values.replication.graphd.replicas }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: nebula-graphd + spec: + restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + containers: + - name: nebula-graphd + image: "{{ .Values.image.graphd.repository }}:{{ .Values.image.graphd.tag }}" + imagePullPolicy: {{ .Values.image.graphd.pullPolicy }} + env: + - name: USER + value: root + resources: + requests: + cpu: {{ .Values.resources.graphd.requests.cpu | quote }} + memory: {{ .Values.resources.graphd.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.graphd.limits.cpu | quote }} + memory: {{ .Values.resources.graphd.limits.memory | quote }} + ports: + - name: thrift + containerPort: {{ .Values.port.graphd.thriftPort }} + - name: http + containerPort: {{ .Values.port.graphd.httpPort }} + - name: http2 + containerPort: {{ .Values.port.graphd.http2Port }} + command: ["/usr/local/nebula/bin/nebula-graphd", "--flagfile=/usr/local/nebula/etc/nebula-graphd.conf", "--v=0", "--minloglevel=2", "--daemonize=false"] + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.graphd.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end }} + resources: + #TODO: Change these to appropriate values for the hardware that you're running. + requests: + cpu: "{{ .Values.resources.graphd.requests.cpu }}" + memory: "{{ .Values.resources.graphd.requests.memory }}" + limits: + cpu: "{{ .Values.resources.graphd.limits.cpu }}" + memory: "{{ .Values.resources.graphd.limits.memory }}" + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: timezone + mountPath: /etc/localtime + volumes: + - name: timezone + hostPath: + path: /etc/localtime + - name: config + configMap: + name: nebula-graphd + + \ No newline at end of file diff --git a/kubernetes/helm/templates/ingress-configmap.yaml b/kubernetes/helm/templates/ingress-configmap.yaml new file mode 100644 index 00000000000..b03efdc216b --- /dev/null +++ b/kubernetes/helm/templates/ingress-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: graphd-services + namespace: {{ .Values.namespace }} +data: + 3699: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.thriftPort }}" + 13000: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.httpPort }}" + 13002: "{{ .Values.namespace}}/nebula-graphd:{{ .Values.port.graphd.http2Port }}" diff --git a/kubernetes/helm/templates/pdb.yaml b/kubernetes/helm/templates/pdb.yaml new file mode 100644 index 00000000000..acff368756c --- /dev/null +++ b/kubernetes/helm/templates/pdb.yaml @@ -0,0 +1,12 @@ +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: nebula-budget + labels: + app: nebula-budget +spec: + selector: + matchLabels: + app: nebula-storaged + app: nebula-metad + maxUnavailable: 1 \ No newline at end of file diff --git a/kubernetes/helm/templates/service.yaml b/kubernetes/helm/templates/service.yaml new file mode 100644 index 00000000000..2eebce8c3a4 --- /dev/null +++ b/kubernetes/helm/templates/service.yaml @@ -0,0 +1,67 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-metad + name: nebula-metad + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + ports: + - name: thrift + port: {{ .Values.port.metad.thriftPort }} + - name: raft + port: {{ .Values.port.metad.raftPort }} + - name: http + port: {{ .Values.port.metad.httpPort }} + - name: http2 + port: {{ .Values.port.metad.http2Port }} + selector: + app: nebula-metad +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-storaged + name: nebula-storaged + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + ports: + - name: thrift + port: {{ .Values.port.storaged.thriftPort }} + - name: raft + port: {{ .Values.port.storaged.raftPort }} + - name: http + port: {{ .Values.port.storaged.httpPort }} + - name: http2 + port: {{ .Values.port.storaged.http2Port }} + selector: + app: nebula-storaged +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: nebula-graphd + name: nebula-graphd + namespace: {{ .Values.namespace }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + publishNotReadyAddresses: true + type: NodePort + ports: + - name: thrift + port: {{ .Values.port.graphd.thriftPort }} + - name: http + port: {{ .Values.port.graphd.httpPort }} + - name: http2 + port: {{ .Values.port.graphd.http2Port }} + selector: + app: nebula-graphd \ No newline at end of file diff --git a/kubernetes/helm/templates/statefulset.yaml b/kubernetes/helm/templates/statefulset.yaml new file mode 100644 index 00000000000..c2c56484188 --- /dev/null +++ b/kubernetes/helm/templates/statefulset.yaml @@ -0,0 +1,190 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: nebula-metad + namespace: {{ .Values.namespace }} + labels: + app: nebula-metad +spec: + serviceName: nebula-metad + replicas: {{ .Values.replication.metad.replicas }} + selector: + matchLabels: + app: nebula-metad + template: + metadata: + labels: + app: nebula-metad + spec: + terminationGracePeriodSeconds: 60 + # hostname: meta.nebula.services + restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + hostNetwork: true + containers: + - name: nebula-metad + image: "{{ .Values.image.metad.repository }}:{{ .Values.image.metad.tag }}" + imagePullPolicy: {{ .Values.image.metad.pullPolicy }} + env: + - name: USER + value: root + resources: + requests: + cpu: {{ .Values.resources.metad.requests.cpu | quote }} + memory: {{ .Values.resources.metad.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.metad.limits.cpu | quote }} + memory: {{ .Values.resources.metad.limits.memory | quote }} + ports: + - containerPort: {{ .Values.port.metad.thriftPort }} + name: thrift + - containerPort: {{ .Values.port.metad.raftPort }} + name: raft + - containerPort: {{ .Values.port.metad.httpPort }} + name: http + - containerPort: {{ .Values.port.metad.http2Port }} + name: http2 + command: + - "/bin/bash" + - "-ecx" + - "exec /usr/local/nebula/bin/nebula-metad --flagfile=/usr/local/nebula/etc/nebula-metad.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2 --daemonize=false" + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.metad.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end }} + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: data + mountPath: /usr/local/nebula/data + - name: timezone + mountPath: /etc/localtime + - name: log + mountPath: /usr/local/nebula/logs + volumes: + - name: config + configMap: + name: nebula-metad + - name: timezone + hostPath: + path: /etc/localtime + - name: data + persistentVolumeClaim: + claimName: data + - name: log + hostPath: + path: /var/log + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storage.storageClass }} + resources: + requests: + storage: {{ .Values.storage.metad.size }} + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: nebula-storaged + labels: + app: nebula-storaged +spec: + serviceName: nebula-storaged + replicas: {{ .Values.replication.storaged.replicas }} + selector: + matchLabels: + app: nebula-storaged + template: + metadata: + labels: + app: nebula-storaged + spec: + terminationGracePeriodSeconds: 60 + restartPolicy: Always +{{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + hostNetwork: true + containers: + - name: nebula-storaged + image: "{{ .Values.image.storaged.repository }}:{{ .Values.image.storaged.tag }}" + imagePullPolicy: {{ .Values.image.storaged.pullPolicy }} + env: + - name: USER + value: root + resources: + requests: + cpu: {{ .Values.resources.storaged.requests.cpu | quote }} + memory: {{ .Values.resources.storaged.requests.memory | quote }} + limits: + cpu: {{ .Values.resources.storaged.limits.cpu | quote }} + memory: {{ .Values.resources.storaged.limits.memory | quote }} + ports: + - containerPort: {{ .Values.port.storaged.thriftPort }} + name: thrift + - containerPort: {{ .Values.port.storaged.raftPort }} + name: raft + - containerPort: {{ .Values.port.storaged.httpPort }} + name: http + - containerPort: {{ .Values.port.storaged.http2Port }} + name: http2 + command: + - "/bin/bash" + - "-ecx" + - "exec /usr/local/nebula/bin/nebula-storaged --flagfile=/usr/local/nebula/etc/nebula-storaged.conf --local_ip=$(hostname -i) --v=0 --minloglevel=2 --daemonize=false" + {{- if .Values.livenessProbe.graphd.Enable }} + livenessProbe: + httpGet: + path: /status + port: {{ .Values.port.storaged.httpPort }} + initialDelaySeconds: 30 + timeoutSeconds: 5 + {{- end}} + volumeMounts: + - name: config + mountPath: /usr/local/nebula/etc/ + - name: data + mountPath: /usr/local/nebula/data + - name: timezone + mountPath: /etc/localtime + - name: log + mountPath: /usr/local/nebula/logs + volumes: + - name: config + configMap: + name: nebula-storaged + - name: timezone + hostPath: + path: /etc/localtime + - name: data + persistentVolumeClaim: + claimName: data + - name: log + hostPath: + path: /var/log + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.storage.storageClass }} + resources: + requests: + storage: {{ .Values.storage.storaged.size }} diff --git a/kubernetes/helm/values.yaml b/kubernetes/helm/values.yaml new file mode 100644 index 00000000000..217a912a7c1 --- /dev/null +++ b/kubernetes/helm/values.yaml @@ -0,0 +1,108 @@ +# specify kubernetes namespace +namespace: default + +# the nebula images +image: + # the nebula-graphd images,change tag to use a different version. + graphd: + repository: vesoft/nebula-graphd + tag: nightly + pullPolicy: Always + # the nebula-metad images,change tag to use a different version. + metad: + repository: vesoft/nebula-metad + tag: nightly + pullPolicy: Always + # the nebula storaged images,change tag to use a different version. + storaged: + repository: vesoft/nebula-storaged + tag: nightly + pullPolicy: Always + +# set livenessProbe, kubelet will check nebula pod alive +livenessProbe: + graphd: + Enable: true + metad: + Enable: true + storaged: + Enable: true + +## Optional resource requests and limits for the nebula container +resources: + graphd: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + + metad: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + + storaged: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 1000m + memory: 1024Mi + +replication: + ## Number of nebula-storaged replicas + storaged: + replicas: 3 + ## Number of nebula-metad replicas + metad: + replicas: 3 + ## Number of nebula-graphd replicas + graphd: + replicas: 3 + +port: + ## Service And Container Port Setting of Nebula-Graphd + graphd: + serviceType: ClusterIP + thriftPort: 3699 + httpPort: 13000 + http2Port: 13002 + ## Service And Container Port Setting of Nebula-Storaged + storaged: + serviceType: ClusterIP + thriftPort: 45600 + raftPort: 45601 + httpPort: 12000 + http2Port: 12002 + ## Service And Container Port Setting of Nebula-Metad + metad: + serviceType: ClusterIP + thriftPort: 44500 + raftPort: 44501 + httpPort: 11000 + http2Port: 11002 + +MetadHosts: + - 192.168.8.21:44500 + - 192.168.8.22:44500 + - 192.168.8.23:44500 + +## Storage Setting of Nebula-Metad and Nebula-Storaged +storage: + storageClass: fast-disks + storaged: + size: 20Gi + metad: + size: 20Gi + +nodeSelector: + nebula: "yes" + +tolerations: [] + +affinity: {}