Skip to content

Commit

Permalink
v3 support (#121)
Browse files Browse the repository at this point in the history
* v3 support
  • Loading branch information
MegaByte875 authored Mar 31, 2022
1 parent 7297f63 commit facd74e
Show file tree
Hide file tree
Showing 15 changed files with 506 additions and 200 deletions.
8 changes: 8 additions & 0 deletions apis/apps/v1alpha1/nebulacluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,3 +80,11 @@ func (nc *NebulaCluster) IsPVReclaimEnabled() bool {
}
return *enabled
}

func (nc *NebulaCluster) IsAutoBalanceEnabled() bool {
enabled := nc.Spec.Storaged.EnableAutoBalance
if enabled == nil {
return false
}
return *enabled
}
3 changes: 2 additions & 1 deletion apis/apps/v1alpha1/nebulacluster_metad.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,8 @@ func (c *metadComponent) GenerateVolumeMounts() []corev1.VolumeMount {
mounts = append(mounts, corev1.VolumeMount{
Name: "nebula-license",
ReadOnly: true,
MountPath: "/usr/local/nebula/share/resources",
MountPath: "/usr/local/nebula/share/nebula.license",
SubPath: "nebula.license",
})
}

Expand Down
2 changes: 1 addition & 1 deletion apis/apps/v1alpha1/nebulacluster_storaged.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,5 +329,5 @@ func (c *storagedComponent) GenerateConfigMap() *corev1.ConfigMap {
}

func (c *storagedComponent) UpdateComponentStatus(status *ComponentStatus) {
c.nc.Status.Storaged = *status
c.nc.Status.Storaged.ComponentStatus = *status
}
17 changes: 15 additions & 2 deletions apis/apps/v1alpha1/nebulacluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ type NebulaClusterSpec struct {
type NebulaClusterStatus struct {
Graphd ComponentStatus `json:"graphd,omitempty"`
Metad ComponentStatus `json:"metad,omitempty"`
Storaged ComponentStatus `json:"storaged,omitempty"`
Storaged StoragedStatus `json:"storaged,omitempty"`
Conditions []NebulaClusterCondition `json:"conditions,omitempty"`
}

Expand All @@ -109,6 +109,19 @@ type ComponentStatus struct {
Workload WorkloadStatus `json:"workload,omitempty"`
}

// StoragedStatus describes the status and version of nebula storaged.
type StoragedStatus struct {
ComponentStatus `json:",omitempty,inline"`
HostsAdded bool `json:"hostsAdded,omitempty"`
LastBalanceJob *BalanceJob `json:"lastBalanceJob,omitempty"`
}

// BalanceJob describes the admin job for balance data.
type BalanceJob struct {
Space string `json:"space,omitempty"`
JobID int32 `json:"jobID,omitempty"`
}

// WorkloadStatus describes the status of a specified workload.
type WorkloadStatus struct {
// ObservedGeneration is the most recent generation observed for this Workload. It corresponds to the
Expand Down Expand Up @@ -236,7 +249,7 @@ type StoragedSpec struct {
// +optional
DataVolumeClaim *StorageClaim `json:"dataVolumeClaim,omitempty"`

// Flag to enable/disable auto balance data and leader while the nebula storaged scale out , default false
// Flag to enable/disable auto balance data and leader while the nebula storaged scale out, default false
// +optional
EnableAutoBalance *bool `json:"enableAutoBalance,omitempty"`
}
Expand Down
148 changes: 123 additions & 25 deletions apis/apps/v1alpha1/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ const (
--stderr_log_file=graphd-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# wether logging files' name contain timestamp
--timestamp_in_logfile_name=true
########## query ##########
# Whether to treat partial success as an error.
Expand All @@ -70,10 +72,11 @@ const (
--reuse_port=false
# Backlog of the listen socket, adjust this together with net.core.somaxconn
--listen_backlog=1024
# Seconds before the idle connections are closed, 0 for never closed
--client_idle_timeout_secs=0
# Seconds before the idle sessions are expired, 0 for no expiration
--session_idle_timeout_secs=60000
# The number of seconds Nebula service waits before closing the idle connections
--client_idle_timeout_secs=28800
# The number of seconds before idle sessions expire
# The range should be in [1, 604800]
--session_idle_timeout_secs=28800
# The number of threads to accept incoming connections
--num_accept_threads=1
# The number of networking IO threads, 0 for # of CPU cores
Expand All @@ -84,8 +87,6 @@ const (
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19669
# HTTP2 service port
--ws_h2_port=19670
# storage client timeout
--storage_client_timeout_ms=60000
# Port to listen on Meta with HTTP protocol, it corresponds to ws_http_port in metad's configuration file
Expand All @@ -98,8 +99,64 @@ const (
--auth_type=password
########## memory ##########
# System memory high watermark ratio
--system_memory_high_watermark_ratio=1
# System memory high watermark ratio, cancel the memory checking when the ratio greater than 1.0
--system_memory_high_watermark_ratio=1.0
########## audit ##########
# This variable is used to enable audit. The value can be 'true' or 'false'.
--enable_audit=false
# This variable is used to configure where the audit log will be written. Optional:[ file | es ]
# If it is set to 'file', the log will be written into a file specified by audit_log_file variable.
# If it is set to 'es', the audit log will be written to Elasticsearch.
--audit_log_handler=file
# This variable is used to specify the filename that’s going to store the audit log.
# It can contain the path relative to the install dir or absolute path.
# This variable has effect only when audit_log_handler is set to 'file'.
--audit_log_file=./logs/audit/audit.log
# This variable is used to specify the audit log strategy, Optional:[ asynchronous| synchronous ]
# asynchronous: log using memory buffer, do not block the main thread
# synchronous: log directly to file, flush and sync every event
# Caution: For performance reasons, when the buffer is full and has not been flushed to the disk,
# the 'asynchronous' mode will discard subsequent requests.
# This variable has effect only when audit_log_handler is set to 'file'.
--audit_log_strategy=synchronous
# This variable can be used to specify the size of memory buffer used for logging,
# used when audit_log_strategy variable is set to 'asynchronous' values.
# This variable has effect only when audit_log_handler is set to 'file'. Uint: B
--audit_log_max_buffer_size=1048576
# This variable is used to specify the audit log format. Supports three log formats [ xml | json | csv ]
# This variable has effect only when audit_log_handler is set to 'file'.
--audit_log_format=xml
# This variable can be used to specify the comma-separated list of Elasticsearch addresses,
# eg, '192.168.0.1:7001, 192.168.0.2:7001'.
# This variable has effect only when audit_log_handler is set to 'es'.
--audit_log_es_address=
# This variable can be used to specify the user name of the Elasticsearch.
# This variable has effect only when audit_log_handler is set to 'es'.
--audit_log_es_user=
# This variable can be used to specify the user password of the Elasticsearch.
# This variable has effect only when audit_log_handler is set to 'es'.
--audit_log_es_password=
# This variable can be used to specify the number of logs which are sent to Elasticsearch at one time.
# This variable has effect only when audit_log_handler is set to 'es'.
--audit_log_es_batch_size=1000
# This variable is used to specify the list of spaces for not tracking.
# The value can be comma separated list of spaces, ie, 'nba, basketball'.
--audit_log_exclude_spaces=
# This variable is used to specify the list of log categories for tracking, eg, 'login, ddl'.
# There are eight categories for tracking. There are: [ login | exit | ddl | dql | dml | dcl | util | unknown ].
--audit_log_categories=login,exit
########## metrics ##########
--enable_space_level_metrics=false
########## experimental feature ##########
# if use experimental features
--enable_experimental_feature=false
########## session ##########
# Maximum number of sessions that can be created per IP and per user
--max_sessions_per_ip_per_user=300
`
// nolint: revive
MetadhConfigTemplate = `
Expand All @@ -108,6 +165,7 @@ const (
--daemonize=true
# The file to host the process id
--pid_file=pids/nebula-metad.pid
--license-path=share/nebula.license
########## logging ##########
# The directory to host logging files
Expand All @@ -125,6 +183,8 @@ const (
--stderr_log_file=metad-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# wether logging files' name contain time stamp.
--timestamp_in_logfile_name=true
########## networking ##########
# Comma separated Meta Server addresses
Expand All @@ -139,8 +199,6 @@ const (
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19559
# HTTP2 service port
--ws_h2_port=19560
# Port to listen on Storage with HTTP protocol, it corresponds to ws_http_port in storage's configuration file
--ws_storage_http_port=19779
Expand All @@ -155,6 +213,7 @@ const (
--default_replica_factor=1
--heartbeat_interval_secs=10
--agent_heartbeat_interval_secs=60
############## rocksdb Options ##############
--rocksdb_wal_sync=true
Expand Down Expand Up @@ -185,6 +244,8 @@ const (
--stderr_log_file=storaged-stderr.log
# Copy log messages at or above this level to stderr in addition to logfiles. The numbers of severity levels INFO, WARNING, ERROR, and FATAL are 0, 1, 2, and 3, respectively.
--stderrthreshold=2
# Wether logging files' name contain timestamp.
--timestamp_in_logfile_name=true
########## networking ##########
# Comma separated Meta server addresses
Expand All @@ -199,8 +260,6 @@ const (
--ws_ip=0.0.0.0
# HTTP service port
--ws_http_port=19779
# HTTP2 service port
--ws_h2_port=19780
# heartbeat with meta service
--heartbeat_interval_secs=10
Expand All @@ -213,7 +272,7 @@ const (
--wal_ttl=14400
########## Disk ##########
# Root data path. Split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
# Root data path. split by comma. e.g. --data_path=/disk1/path1/,/disk2/path2/
# One path per Rocksdb instance.
--data_path=data/storage
Expand All @@ -225,8 +284,9 @@ const (
# The default block cache size used in BlockBasedTable.
# The unit is MB.
--rocksdb_block_cache=4
# The type of storage engine: rocksdb, memory, etc.
--engine_type=rocksdb
# Disable page cache to better control memory used by rocksdb.
# Caution: Make sure to allocate enough block cache if disabling page cache!
--disable_page_cache=false
# Compression algorithm, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
# For the sake of binary compatibility, the default value is snappy.
Expand All @@ -242,6 +302,14 @@ const (
# In order to disable compression for level 0/1, set it to "no:no"
--rocksdb_compression_per_level=
############## rocksdb Options ##############
# rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
--rocksdb_db_options={"max_subcompactions":"4","max_background_jobs":"4"}
# rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_column_family_options={"disable_auto_compactions":"false","write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"}
# rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_block_based_table_options={"block_size":"8192"}
# Whether or not to enable rocksdb's statistics, disabled by default
--enable_rocksdb_statistics=false
Expand All @@ -253,19 +321,49 @@ const (
# * kAll, Collect all stats
--rocksdb_stats_level=kExceptHistogramOrTimers
# Whether or not to enable rocksdb's prefix bloom filter, disabled by default.
--enable_rocksdb_prefix_filtering=false
# Whether or not to enable rocksdb's prefix bloom filter, enabled by default.
--enable_rocksdb_prefix_filtering=true
# Whether or not to enable rocksdb's whole key bloom filter, disabled by default.
--enable_rocksdb_whole_key_filtering=false
############## rocksdb Options ##############
# rocksdb DBOptions in json, each name and value of option is a string, given as "option_name":"option_value" separated by comma
--rocksdb_db_options={}
# rocksdb ColumnFamilyOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_column_family_options={"write_buffer_size":"67108864","max_write_buffer_number":"4","max_bytes_for_level_base":"268435456"}
# rocksdb BlockBasedTableOptions in json, each name and value of option is string, given as "option_name":"option_value" separated by comma
--rocksdb_block_based_table_options={"block_size":"8192"}
############## Key-Value separation ##############
# Whether or not to enable BlobDB (RocksDB key-value separation support)
--rocksdb_enable_kv_separation=false
# RocksDB key value separation threshold in bytes. Values at or above this threshold will be written to blob files during flush or compaction.
--rocksdb_kv_separation_threshold=100
# Compression algorithm for blobs, options: no,snappy,lz4,lz4hc,zlib,bzip2,zstd
--rocksdb_blob_compression=lz4
# Whether to garbage collect blobs during compaction
--rocksdb_enable_blob_garbage_collection=true
############## storage cache ##############
# Whether to enable storage cache
--enable_storage_cache=false
# Total capacity reserved for storage in memory cache in MB
--storage_cache_capacity=0
# Number of buckets in base 2 logarithm. E.g., in case of 20, the total number of buckets will be 2^20.
# A good estimate can be ceil(log2(cache_entries * 1.6)). The maximum allowed is 32.
--storage_cache_buckets_power=20
# Number of locks in base 2 logarithm. E.g., in case of 10, the total number of locks will be 2^10.
# A good estimate can be max(1, buckets_power - 10). The maximum allowed is 32.
--storage_cache_locks_power=10
# Whether to add vertex pool in cache. Only valid when storage cache is enabled.
--enable_vertex_pool=false
# Vertex pool size in MB
--vertex_pool_capacity=50
# TTL in seconds for vertex items in the cache
--vertex_item_ttl=300
# Whether to add empty key pool in cache. Only valid when storage cache is enabled.
--enable_empty_key_pool=false
# Empty key pool size in MB
--empty_key_pool_capacity=50
# TTL in seconds for empty key items in the cache
--empty_key_item_ttl=300
############### misc ####################
--snapshot_part_rate_limit=8388608
--snapshot_part_rate_limit=10485760
--snapshot_batch_size=1048576
--rebuild_index_part_rate_limit=4194304
--rebuild_index_batch_size=1048576
Expand Down
36 changes: 36 additions & 0 deletions apis/apps/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions config/crd/bases/apps.nebula-graph.io_nebulaclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5662,6 +5662,16 @@ spec:
type: object
storaged:
properties:
hostsAdded:
type: boolean
lastBalanceJob:
properties:
jobID:
format: int32
type: integer
space:
type: string
type: object
phase:
type: string
version:
Expand Down
Loading

0 comments on commit facd74e

Please sign in to comment.