From d957f0bc9a197dc58690d78e3194c8532ec25c09 Mon Sep 17 00:00:00 2001 From: rawmind0 Date: Fri, 18 May 2018 15:56:00 +0200 Subject: [PATCH] Updated es-cluster package to work on rancher v1.6.15 and higher --- templates/es-cluster/0/rancher-compose.yml | 1 + templates/es-cluster/1/rancher-compose.yml | 1 + templates/es-cluster/2/rancher-compose.yml | 1 + templates/es-cluster/3/rancher-compose.yml | 1 + templates/es-cluster/4/rancher-compose.yml | 1 + templates/es-cluster/5/README.md | 5 + templates/es-cluster/5/docker-compose.yml.tpl | 152 ++++++++++++++++++ templates/es-cluster/5/rancher-compose.yml | 111 +++++++++++++ templates/es-cluster/6/README.md | 5 + templates/es-cluster/6/docker-compose.yml.tpl | 152 ++++++++++++++++++ templates/es-cluster/6/rancher-compose.yml | 111 +++++++++++++ templates/es-cluster/config.yml | 2 +- 12 files changed, 542 insertions(+), 1 deletion(-) create mode 100644 templates/es-cluster/5/README.md create mode 100644 templates/es-cluster/5/docker-compose.yml.tpl create mode 100644 templates/es-cluster/5/rancher-compose.yml create mode 100644 templates/es-cluster/6/README.md create mode 100644 templates/es-cluster/6/docker-compose.yml.tpl create mode 100644 templates/es-cluster/6/rancher-compose.yml diff --git a/templates/es-cluster/0/rancher-compose.yml b/templates/es-cluster/0/rancher-compose.yml index e5580a17e..e3f36fd02 100644 --- a/templates/es-cluster/0/rancher-compose.yml +++ b/templates/es-cluster/0/rancher-compose.yml @@ -2,6 +2,7 @@ name: Elasticsearch Cluster version: 5.4.0-rancher1 description: Scalable Elasticsearch Cluster + maximum_rancher_version: v1.6.14 questions: - variable: "cluster_name" diff --git a/templates/es-cluster/1/rancher-compose.yml b/templates/es-cluster/1/rancher-compose.yml index 632407f22..8f0520add 100644 --- a/templates/es-cluster/1/rancher-compose.yml +++ b/templates/es-cluster/1/rancher-compose.yml @@ -2,6 +2,7 @@ name: Elasticsearch Cluster version: 5.4.2-rancher1 description: Scalable Elasticsearch Cluster + maximum_rancher_version: v1.6.14 questions: - variable: "cluster_name" diff --git a/templates/es-cluster/2/rancher-compose.yml b/templates/es-cluster/2/rancher-compose.yml index aefd04947..4bd90c349 100644 --- a/templates/es-cluster/2/rancher-compose.yml +++ b/templates/es-cluster/2/rancher-compose.yml @@ -3,6 +3,7 @@ catalog: name: Elasticsearch Cluster version: 5.4.2-rancher2 description: Scalable Elasticsearch Cluster + maximum_rancher_version: v1.6.14 questions: - variable: "cluster_name" diff --git a/templates/es-cluster/3/rancher-compose.yml b/templates/es-cluster/3/rancher-compose.yml index b39638b62..6cb9efe35 100644 --- a/templates/es-cluster/3/rancher-compose.yml +++ b/templates/es-cluster/3/rancher-compose.yml @@ -3,6 +3,7 @@ catalog: name: Elasticsearch Cluster version: 5.5.1-rancher1 description: Scalable Elasticsearch Cluster + maximum_rancher_version: v1.6.14 questions: - variable: "cluster_name" diff --git a/templates/es-cluster/4/rancher-compose.yml b/templates/es-cluster/4/rancher-compose.yml index 287c03514..edf2da98f 100644 --- a/templates/es-cluster/4/rancher-compose.yml +++ b/templates/es-cluster/4/rancher-compose.yml @@ -3,6 +3,7 @@ catalog: name: Elasticsearch Cluster version: 6.2.3-rancher1 description: Scalable Elasticsearch Cluster + maximum_rancher_version: v1.6.14 questions: - variable: "cluster_name" diff --git a/templates/es-cluster/5/README.md b/templates/es-cluster/5/README.md new file mode 100644 index 000000000..c6621887a --- /dev/null +++ b/templates/es-cluster/5/README.md @@ -0,0 +1,5 @@ +# Elasticsearch Cluster + +A scalable Elasticsearch cluster + +WARN: To avoid vm.max_map_count errors you could set "Update host sysctl" to true. Then param vm.max_map_count will be update to 262144 if it's less in your hosts. diff --git a/templates/es-cluster/5/docker-compose.yml.tpl b/templates/es-cluster/5/docker-compose.yml.tpl new file mode 100644 index 000000000..036d172d6 --- /dev/null +++ b/templates/es-cluster/5/docker-compose.yml.tpl @@ -0,0 +1,152 @@ +version: '2' +services: + es-master: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-master-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:5.5.1 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "ES_JAVA_OPTS=-Xms${master_heap_size} -Xmx${master_heap_size}" + - "discovery.zen.ping.unicast.hosts=es-master" + - "discovery.zen.minimum_master_nodes=${minimum_master_nodes}" + - "node.master=true" + - "node.data=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${master_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-master-storage + + es-master-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-master-volume:/usr/share/elasticsearch/data + + es-data: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-data-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:5.5.1 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "discovery.zen.ping.unicast.hosts=es-master" + - "ES_JAVA_OPTS=-Xms${data_heap_size} -Xmx${data_heap_size}" + - "node.master=false" + - "node.data=true" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${data_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-data-storage + depends_on: + - es-master + + es-data-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-data-volume:/usr/share/elasticsearch/data + + es-client: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-client-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:5.5.1 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "discovery.zen.ping.unicast.hosts=es-master" + - "ES_JAVA_OPTS=-Xms${client_heap_size} -Xmx${client_heap_size}" + - "node.master=false" + - "node.data=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${client_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-client-storage + depends_on: + - es-master + + es-client-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-client-volume:/usr/share/elasticsearch/data + + {{- if eq .Values.UPDATE_SYSCTL "true" }} + es-sysctl: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-sysctl:0.1 + privileged: true + environment: + - "SYSCTL_KEY=vm.max_map_count" + - "SYSCTL_VALUE=262144" + {{- end}} + +volumes: + es-master-volume: + driver: ${VOLUME_DRIVER} + per_container: true + es-data-volume: + driver: ${VOLUME_DRIVER} + per_container: true + es-client-volume: + driver: ${VOLUME_DRIVER} + per_container: true diff --git a/templates/es-cluster/5/rancher-compose.yml b/templates/es-cluster/5/rancher-compose.yml new file mode 100644 index 000000000..b63f11404 --- /dev/null +++ b/templates/es-cluster/5/rancher-compose.yml @@ -0,0 +1,111 @@ +version: '2' +catalog: + name: Elasticsearch Cluster + version: 5.5.1-rancher2 + description: Scalable Elasticsearch Cluster + + questions: + - variable: "cluster_name" + type: "string" + required: true + label: "Cluster name" + description: "Name of the Elasticsearch Cluster" + default: "es-cluster" + + - variable: "UPDATE_SYSCTL" + label: "Update host sysctl:" + description: | + Set true to avoid vm.max_map_count errors. + WARN: If set true, host param vm.max_map_count will be update to 262144. + default: false + required: true + type: enum + options: + - false + - true + + - variable: "master_heap_size" + type: "string" + required: true + label: "Heap size (master nodes)" + description: "Heap size to be allocated for Java (master nodes)" + default: "512m" + + - variable: "master_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (master nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (master nodes)" + default: 1073741824 + + - variable: "data_heap_size" + type: "string" + required: true + label: "Heap size (data nodes)" + description: "Heap size to be allocated for Java (data nodes)" + default: "512m" + + - variable: "data_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (data nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (data nodes)" + default: 1073741824 + + - variable: "client_heap_size" + type: "string" + required: true + label: "Heap size (client nodes)" + description: "Heap size to be allocated for Java (client nodes)" + default: "512m" + + - variable: "client_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (client nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (client nodes)" + default: 1073741824 + + - variable: "minimum_master_nodes" + type: "int" + required: true + label: "# of minimum Master Nodes" + description: "Set the number of required master nodes to reach quorum. Sets initial scale to this value as well" + default: 3 + + - variable: "initial_data_nodes" + type: "int" + required: true + label: "# of initial data nodes" + description: "Set the initial number of data nodes" + default: 2 + + - variable: "initial_client_nodes" + type: "int" + required: true + label: "# of initial client nodes" + description: "Set the initial number of client nodes" + default: 1 + + - variable: "VOLUME_DRIVER" + description: "The VOLUME driver to associate with this server" + label: "VOLUME Driver" + required: true + default: "local" + type: enum + options: + - local + - rancher-nfs + - rancher-efs + - rancher-ebs + +services: + + es-master: + scale: ${minimum_master_nodes} + + es-data: + scale: ${initial_data_nodes} + + es-client: + scale: ${initial_client_nodes} diff --git a/templates/es-cluster/6/README.md b/templates/es-cluster/6/README.md new file mode 100644 index 000000000..c6621887a --- /dev/null +++ b/templates/es-cluster/6/README.md @@ -0,0 +1,5 @@ +# Elasticsearch Cluster + +A scalable Elasticsearch cluster + +WARN: To avoid vm.max_map_count errors you could set "Update host sysctl" to true. Then param vm.max_map_count will be update to 262144 if it's less in your hosts. diff --git a/templates/es-cluster/6/docker-compose.yml.tpl b/templates/es-cluster/6/docker-compose.yml.tpl new file mode 100644 index 000000000..d48f0b639 --- /dev/null +++ b/templates/es-cluster/6/docker-compose.yml.tpl @@ -0,0 +1,152 @@ +version: '2' +services: + es-master: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-master-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "ES_JAVA_OPTS=-Xms${master_heap_size} -Xmx${master_heap_size}" + - "discovery.zen.ping.unicast.hosts=es-master" + - "discovery.zen.minimum_master_nodes=${minimum_master_nodes}" + - "node.master=true" + - "node.data=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${master_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-master-storage + + es-master-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-master-volume:/usr/share/elasticsearch/data + + es-data: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-data-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "discovery.zen.ping.unicast.hosts=es-master" + - "ES_JAVA_OPTS=-Xms${data_heap_size} -Xmx${data_heap_size}" + - "node.master=false" + - "node.data=true" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${data_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-data-storage + depends_on: + - es-master + + es-data-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-data-volume:/usr/share/elasticsearch/data + + es-client: + labels: + io.rancher.scheduler.affinity:container_label_soft_ne: io.rancher.stack_service.name=$${stack_name}/$${service_name} + io.rancher.container.hostname_override: container_name + io.rancher.sidekicks: es-client-storage{{- if eq .Values.UPDATE_SYSCTL "true" -}},es-sysctl{{- end}} + image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3 + environment: + - "cluster.name=${cluster_name}" + - "node.name=$${HOSTNAME}" + - "bootstrap.memory_lock=true" + - "xpack.security.enabled=false" + - "discovery.zen.ping.unicast.hosts=es-master" + - "ES_JAVA_OPTS=-Xms${client_heap_size} -Xmx${client_heap_size}" + - "node.master=false" + - "node.data=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + mem_limit: ${client_mem_limit} + mem_swappiness: 0 + cap_add: + - IPC_LOCK + volumes_from: + - es-client-storage + depends_on: + - es-master + + es-client-storage: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-volume:0.0.2-1 + environment: + - SERVICE_UID=1000 + - SERVICE_GID=1000 + - SERVICE_VOLUME=/usr/share/elasticsearch/data + volumes: + - es-client-volume:/usr/share/elasticsearch/data + + {{- if eq .Values.UPDATE_SYSCTL "true" }} + es-sysctl: + labels: + io.rancher.container.start_once: true + network_mode: none + image: rawmind/alpine-sysctl:0.1 + privileged: true + environment: + - "SYSCTL_KEY=vm.max_map_count" + - "SYSCTL_VALUE=262144" + {{- end}} + +volumes: + es-master-volume: + driver: ${VOLUME_DRIVER} + per_container: true + es-data-volume: + driver: ${VOLUME_DRIVER} + per_container: true + es-client-volume: + driver: ${VOLUME_DRIVER} + per_container: true diff --git a/templates/es-cluster/6/rancher-compose.yml b/templates/es-cluster/6/rancher-compose.yml new file mode 100644 index 000000000..975a0dac3 --- /dev/null +++ b/templates/es-cluster/6/rancher-compose.yml @@ -0,0 +1,111 @@ +version: '2' +catalog: + name: Elasticsearch Cluster + version: 6.2.3-rancher2 + description: Scalable Elasticsearch Cluster + + questions: + - variable: "cluster_name" + type: "string" + required: true + label: "Cluster name" + description: "Name of the Elasticsearch Cluster" + default: "es-cluster" + + - variable: "UPDATE_SYSCTL" + label: "Update host sysctl:" + description: | + Set true to avoid vm.max_map_count errors. + WARN: If set true, host param vm.max_map_count will be update to 262144. + default: false + required: true + type: enum + options: + - false + - true + + - variable: "master_heap_size" + type: "string" + required: true + label: "Heap size (master nodes)" + description: "Heap size to be allocated for Java (master nodes)" + default: "512m" + + - variable: "master_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (master nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (master nodes)" + default: 1073741824 + + - variable: "data_heap_size" + type: "string" + required: true + label: "Heap size (data nodes)" + description: "Heap size to be allocated for Java (data nodes)" + default: "512m" + + - variable: "data_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (data nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (data nodes)" + default: 1073741824 + + - variable: "client_heap_size" + type: "string" + required: true + label: "Heap size (client nodes)" + description: "Heap size to be allocated for Java (client nodes)" + default: "512m" + + - variable: "client_mem_limit" + type: "int" + required: true + label: "Memory limit in byte (client nodes)" + description: "Memory limit in Byte per elasticsearch container. AT LEAST double the heap size! (client nodes)" + default: 1073741824 + + - variable: "minimum_master_nodes" + type: "int" + required: true + label: "# of minimum Master Nodes" + description: "Set the number of required master nodes to reach quorum. Sets initial scale to this value as well" + default: 3 + + - variable: "initial_data_nodes" + type: "int" + required: true + label: "# of initial data nodes" + description: "Set the initial number of data nodes" + default: 2 + + - variable: "initial_client_nodes" + type: "int" + required: true + label: "# of initial client nodes" + description: "Set the initial number of client nodes" + default: 1 + + - variable: "VOLUME_DRIVER" + description: "The VOLUME driver to associate with this server" + label: "VOLUME Driver" + required: true + default: "local" + type: enum + options: + - local + - rancher-nfs + - rancher-efs + - rancher-ebs + +services: + + es-master: + scale: ${minimum_master_nodes} + + es-data: + scale: ${initial_data_nodes} + + es-client: + scale: ${initial_client_nodes} diff --git a/templates/es-cluster/config.yml b/templates/es-cluster/config.yml index ce6d65425..c016ef38a 100644 --- a/templates/es-cluster/config.yml +++ b/templates/es-cluster/config.yml @@ -1,5 +1,5 @@ name: Elasticsearch Cluster 6.2.3 description: | Elasticsearch, you know for search! -version: 6.2.3-rancher1 +version: 6.2.3-rancher2 category: ELK