From 0d501353d2f046188dc53b86ae8a119233d112e9 Mon Sep 17 00:00:00 2001 From: Sylvere Richard Date: Fri, 11 May 2018 20:46:47 +0200 Subject: [PATCH] update of swarm example --- build | 2 +- examples/docker/.env | 19 - examples/docker/.gitignore | 1 - examples/docker/Dockerfile-Keeper | 17 - examples/docker/Dockerfile-Proxy | 13 - examples/docker/Dockerfile-Sentinel | 13 - examples/docker/Makefile | 66 --- examples/docker/README.md | 506 ------------------ examples/docker/etc/init-spec.json | 3 - examples/docker/etc/keeper-entrypoint.sh | 9 - examples/docker/local-clean.sh | 24 - examples/docker/local-up.sh | 100 ---- examples/docker/swarm/build.sh | 11 - examples/docker/swarm/clean.sh | 12 - examples/docker/swarm/droplet.sh | 54 -- examples/docker/swarm/init.sh | 18 - examples/docker/swarm/local.sh | 10 - examples/docker/swarm/stolon.sh | 108 ---- examples/swarm/README.md | 212 ++++++++ .../docker-compose-etcd.yml} | 98 ++-- examples/swarm/docker-compose-pg.yml | 90 ++++ examples/swarm/etc/secrets/pgsql | 1 + examples/swarm/etc/secrets/pgsql_repl | 1 + 23 files changed, 345 insertions(+), 1043 deletions(-) delete mode 100644 examples/docker/.env delete mode 100644 examples/docker/.gitignore delete mode 100644 examples/docker/Dockerfile-Keeper delete mode 100644 examples/docker/Dockerfile-Proxy delete mode 100644 examples/docker/Dockerfile-Sentinel delete mode 100644 examples/docker/Makefile delete mode 100644 examples/docker/README.md delete mode 100644 examples/docker/etc/init-spec.json delete mode 100755 examples/docker/etc/keeper-entrypoint.sh delete mode 100755 examples/docker/local-clean.sh delete mode 100755 examples/docker/local-up.sh delete mode 100755 examples/docker/swarm/build.sh delete mode 100755 examples/docker/swarm/clean.sh delete mode 100755 examples/docker/swarm/droplet.sh delete mode 100755 examples/docker/swarm/init.sh delete mode 100755 examples/docker/swarm/local.sh delete mode 100755 examples/docker/swarm/stolon.sh create mode 100644 examples/swarm/README.md rename examples/{docker/docker-compose.yml => swarm/docker-compose-etcd.yml} (51%) create mode 100644 examples/swarm/docker-compose-pg.yml create mode 100644 examples/swarm/etc/secrets/pgsql create mode 100644 examples/swarm/etc/secrets/pgsql_repl diff --git a/build b/build index 71f20b04e..6b6140651 100755 --- a/build +++ b/build @@ -98,7 +98,7 @@ cp ${GOPATH}/bin/keeper ${BINDIR}/stolon-keeper # Copy binaries to Dockerfile image directories declare -a DOCKERFILE_PATHS -DOCKERFILE_PATHS=(${BASEDIR}/examples/kubernetes/image/docker ${BASEDIR}/examples/docker) +DOCKERFILE_PATHS=(${BASEDIR}/examples/kubernetes/image/docker) for path in "${DOCKERFILE_PATHS[@]}" do rm -rf $path/bin/ diff --git a/examples/docker/.env b/examples/docker/.env deleted file mode 100644 index 82443999e..000000000 --- a/examples/docker/.env +++ /dev/null @@ -1,19 +0,0 @@ -COMPOSE_PROJECT_NAME=stolon - -ETCD_VERSION=v3.0.15 - -STOLON_PROXY_PORT=25432 -STOLON_KEEPER_PG_SU_PASSWORDFILE=/etc/stolon/secrets/pgsql -STOLON_KEEPER_PG_REPL_PASSWORDFILE=/etc/stolon/secrets/pgsql-repl - -IMAGE_TAG_SENTINEL=sorintlab/stolon-sentinel:0.5.0 -IMAGE_TAG_KEEPER=sorintlab/stolon-keeper:0.5.0 -IMAGE_TAG_PROXY=sorintlab/stolon-proxy:0.5.0 - -SWARM_MANAGER=swarm-manager -SWARM_WORKER_00=swarm-worker-00 -SWARM_WORKER_01=swarm-worker-01 -SWARM_WORKER_02=swarm-worker-02 - -DO_REGION=sfo1 -DO_SIZE=2GB diff --git a/examples/docker/.gitignore b/examples/docker/.gitignore deleted file mode 100644 index db2fc0de6..000000000 --- a/examples/docker/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets diff --git a/examples/docker/Dockerfile-Keeper b/examples/docker/Dockerfile-Keeper deleted file mode 100644 index bf2f1e3ce..000000000 --- a/examples/docker/Dockerfile-Keeper +++ /dev/null @@ -1,17 +0,0 @@ -FROM postgres:9.6.1 - -ENV STKEEPER_CLUSTER_NAME=stolon-cluster \ - STKEEPER_STORE_BACKEND=etcdv3 \ - STKEEPER_STORE_ENDPOINTS=http://localhost:2379 \ - STKEEPER_DATA_DIR=/data/postgres - -RUN mkdir -p ${STKEEPER_DATA_DIR} && \ - chmod 700 ${STKEEPER_DATA_DIR} && \ - chown postgres -R ${STKEEPER_DATA_DIR} -VOLUME ["${STKEEPER_DATA_DIR}"] - -COPY etc/keeper-entrypoint.sh /usr/local/bin/ -COPY bin/stolon-keeper bin/stolonctl /usr/local/bin/ - -USER postgres -ENTRYPOINT ["keeper-entrypoint.sh"] diff --git a/examples/docker/Dockerfile-Proxy b/examples/docker/Dockerfile-Proxy deleted file mode 100644 index 17efbab56..000000000 --- a/examples/docker/Dockerfile-Proxy +++ /dev/null @@ -1,13 +0,0 @@ -FROM postgres:9.6.1 - -ENV STPROXY_CLUSTER_NAME=stolon-cluster \ - STPROXY_STORE_BACKEND=etcdv3 \ - STPROXY_STORE_ENDPOINTS=http://localhost:2379 \ - STPROXY_LISTEN_ADDRESS=0.0.0.0 \ - STPROXY_PORT=25432 - -COPY bin/stolon-proxy bin/stolonctl /usr/local/bin/ -RUN chmod +x /usr/local/bin/stolon-proxy /usr/local/bin/stolonctl - -USER postgres -ENTRYPOINT ["stolon-proxy"] diff --git a/examples/docker/Dockerfile-Sentinel b/examples/docker/Dockerfile-Sentinel deleted file mode 100644 index 53ad59437..000000000 --- a/examples/docker/Dockerfile-Sentinel +++ /dev/null @@ -1,13 +0,0 @@ -FROM postgres:9.6.1 - -ENV STSENTINEL_CLUSTER_NAME=stolon-cluster \ - STSENTINEL_STORE_BACKEND=etcdv3 \ - STSENTINEL_STORE_ENDPOINTS=http://localhost:2379 \ - STSENTINEL_INITIAL_CLUSTER_SPEC=/etc/stolon/init-spec.json - -COPY bin/stolon-sentinel bin/stolonctl /usr/local/bin/ -COPY etc/init-spec.json ${STSENTINEL_INITIAL_CLUSTER_SPEC} -RUN chmod +x /usr/local/bin/stolon-sentinel /usr/local/bin/stolonctl - -USER postgres -ENTRYPOINT ["stolon-sentinel"] diff --git a/examples/docker/Makefile b/examples/docker/Makefile deleted file mode 100644 index 9f4906212..000000000 --- a/examples/docker/Makefile +++ /dev/null @@ -1,66 +0,0 @@ -.PHONY: build secrets local-up local-clean - -include .env - -export ETCD_VERSION ETCD_TOKEN IMAGE_TAG_SENTINEL IMAGE_TAG_KEEPER IMAGE_TAG_PROXY STOLON_PROXY_PORT STOLON_KEEPER_PG_SU_PASSWORDFILE STOLON_KEEPER_PG_REPL_PASSWORDFILE SWARM_MANAGER SWARM_WORKER_00 SWARM_WORKER_01 SWARM_WORKER_02 DO_REGION DO_SIZE - -build: secrets sentinel keeper proxy - -sentinel: - docker build --rm -t ${IMAGE_TAG_SENTINEL} -f Dockerfile-Sentinel . - -keeper: - docker build --rm -t ${IMAGE_TAG_KEEPER} -f Dockerfile-Keeper . - -proxy: - docker build --rm -t ${IMAGE_TAG_PROXY} -f Dockerfile-Proxy . - -secrets: - rm -rf etc/secrets - mkdir -p etc/secrets - cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1 >> etc/secrets/pgsql - cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1 >> etc/secrets/pgsql-repl - -local-up: - @test -n "${ETCD_TOKEN}" # Please provide a value for $${ETCD_TOKEN} - ./local-up.sh - -local-clean: - ./local-clean.sh - -local-purge: - PURGE_VOLUMES=true ./local-clean.sh - -compose-up: - @test -n "${ETCD_TOKEN}" # Please provide a value for $${ETCD_TOKEN} - docker-compose up -d --remove-orphans - -compose-down: - docker-compose down --remove-orphans - -compose-purge: - docker-compose down -v --remove-orphans - -swarm-local: - swarm/local.sh - -swarm-droplets: - @test -n "${DO_ACCESS_TOKEN}" # Please provide a value for $${DO_ACCESS_TOKEN} - @test -n "${DO_SSH_KEY_FINGERPRINT}" # Please provide a value for $${DO_SSH_KEY_FINGERPRINT} - swarm/droplet.sh - -swarm-init: - swarm/init.sh - -swarm-build: secrets - swarm/build.sh - -swarm-stolon: - @test -n "${ETCD_TOKEN}" # Please provide a value for $${ETCD_TOKEN} - swarm/stolon.sh - -swarm-clean: - swarm/clean.sh - -swarm-destroy: - DESTROY_MACHINES=true swarm/clean.sh diff --git a/examples/docker/README.md b/examples/docker/README.md deleted file mode 100644 index 95d6e95f3..000000000 --- a/examples/docker/README.md +++ /dev/null @@ -1,506 +0,0 @@ -# Dockerized Stolon -Here are some examples on running a Dockerized Stolon cluster. - -All examples are tested with Docker 1.12.5, Docker Compose 1.9.0 and Stolon 0.5.0. - -## Table of Content - -* [Docker Images](#docker-images) -* [Local Cluster](#local-cluster) -* [Docker Compose](#docker-compose) -* [Docker Swarm](#docker-swarm) - -## Docker Images -The following Dockerfiles can be used to build the Docker images of Stolon Sentinel, Keeper and Proxy: - -1. Dockerfile-Sentinel -1. Dockerfile-Keeper -1. Dockerfile-Proxy - -The `etc/init-spec.json` file provides a minimal default Stolon cluster specification. This file can be modified to change the cluster's initial specification. To build the Keeper images, two secret files must be provided at `etc/secrets/pgsql` and `etc/secrets/pgsql-repl`. The content of the `secrets` folder is git-ignored. - -A convenient `build` target is provided in the Makefile to build all the components' image and generate the needed secrets. - -## Local Cluster -This example sets up a Stolon cluster of 1 Sentinel instance, 3 Keeper instances, 1 Proxy instance and 3 etcd instances in your local environment. All containers are connected to a user-defined bridge network, named `stolon-network`. - -To get started, build the Docker images with: -```sh -$ make build -``` - -To set up the local cluster, run: -```sh -$ ETCD_TOKEN= make local-up -``` -The Proxy's port is mapped to a random higher range host port, which can be obtained using the `docker port stolon-proxy` command. - -To make sure the etcd cluster is running correctly: -```sh -$ docker exec etcd-00 etcdctl cluster-health -member 7883f95c8b8e92b is healthy: got healthy result from http://etcd-00:2379 -member 7da9f70288fafe07 is healthy: got healthy result from http://etcd-01:2379 -member 93b4b1aeeb764068 is healthy: got healthy result from http://etcd-02:2379 -cluster is healthy -``` - -To make sure you can connect to the stolon cluster: -```sh -$ docker port stolon-proxy -25432/tcp -> 0.0.0.0:32786 - -$ psql -h 127.0.0.1 -p 32786 -U postgres -Password for user postgres: # can be obtained from your local secrets/pgsql -psql (9.6.1) -Type "help" for help. - -postgres=# -``` - -Now you can run some SQL query tests against the cluster: -```sh -postgres=# CREATE TABLE test (id INT PRIMARY KEY NOT NULL, value TEXT NOT NULL); -CREATE TABLE -postgres=# INSERT INTO test VALUES (1, 'value1'); -INSERT 0 1 -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To make sure that the replication is working correctly, use the `docker stop` command to kill the master keeper. The `docker logs stolon-sentinel` command can be used to determine the master keeper's ID, and monitor the failover process. - -For example, -```sh -$ docker logs stolon-sentinel -.... -[I] 2016-12-20T02:16:26Z sentinel.go:1408: sentinel uid uid=0bf65dce -[I] 2016-12-20T02:16:26Z sentinel.go:84: Trying to acquire sentinels leadership -[I] 2016-12-20T02:16:26Z sentinel.go:1310: writing initial cluster data -[I] 2016-12-20T02:16:26Z sentinel.go:91: sentinel leadership acquired -[I] 2016-12-20T02:16:31Z sentinel.go:571: trying to find initial master -[E] 2016-12-20T02:16:31Z sentinel.go:1361: failed to update cluster data error=cannot choose initial master: no keepers registered -[I] 2016-12-20T02:16:36Z sentinel.go:571: trying to find initial master -[I] 2016-12-20T02:16:36Z sentinel.go:576: initializing cluster keeper=db0f03a1 # <--- this is the master keeper -[W] 2016-12-20T02:16:41Z sentinel.go:245: received db state for unexpected db uid receivedDB= db=77c1631c -[I] 2016-12-20T02:16:41Z sentinel.go:614: waiting for db db=77c1631c keeper=db0f03a1 -[I] 2016-12-20T02:16:46Z sentinel.go:601: db initialized db=77c1631c keeper=db0f03a1 -[I] 2016-12-20T02:17:01Z sentinel.go:1009: added new standby db db=0d640820 keeper=db402496 -[I] 2016-12-20T02:17:01Z sentinel.go:1009: added new standby db db=38d5f2f3 keeper=5001cc2c -..... -$ docker stop stolon-keeper-00 # your master keeper might be different -$ docker logs -f stolon-sentinel -..... -[E] 2016-12-20T02:59:36Z sentinel.go:234: no keeper info available db=77c1631c keeper=db0f03a1 -[E] 2016-12-20T02:59:41Z sentinel.go:234: no keeper info available db=77c1631c keeper=db0f03a1 -[I] 2016-12-20T02:59:41Z sentinel.go:743: master db is failed db=77c1631c keeper=db0f03a1 -[I] 2016-12-20T02:59:41Z sentinel.go:754: trying to find a new master to replace failed master -[I] 2016-12-20T02:59:41Z sentinel.go:785: electing db as the new master db=0d640820 keeper=db402496 -[E] 2016-12-20T02:59:46Z sentinel.go:234: no keeper info available db=77c1631c keeper=db0f03a1 -[E] 2016-12-20T02:59:51Z sentinel.go:234: no keeper info available db=77c1631c keeper=db0f03a1 -[I] 2016-12-20T02:59:51Z sentinel.go:844: removing old master db db=77c1631c -..... -``` - -Once the failover process is completed, you will be able to resume your `psql` session. -```sh -postgres=# SELECT * FROM test; -server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -The connection to the server was lost. Attempting reset: Succeeded. -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To destroy the entire cluster, run `make local-clean` to stop and remove all etcd and stolon containers. By default, the Keepers' volumes aren't removed. To purge the containers and their respective volumes, use `make local-purge`. - -## Docker Compose -This example sets up a Docker Compose Stolon cluster with 1 Sentinel instance, 3 Keeper instances, 1 Proxy instance and 3 etcd instances on your local environment. All containers are connected to a user-defined bridge network, named `stolon-network`. - -To get started, build all the Docker images by running: -```sh -$ make build -``` - -Then run the `compose-up` target in the Makefile: -```sh -$ export ETCD_TOKEN= -$ make compose-up -``` -The `docker-compose.yml` file reads all its required variables from the `.env` file. - -Once `make compose-up` is completed, make sure all the services are running: -```sh -$ docker-compose ps - Name Command State Ports ------------------------------------------------------------------------------------------------ -etcd-00 etcd --name=etcd-00 --data ... Up 2379/tcp, 2380/tcp -etcd-01 etcd --name=etcd-01 --data ... Up 2379/tcp, 2380/tcp -etcd-02 etcd --name=etcd-02 --data ... Up 2379/tcp, 2380/tcp -keeper-00 stolon-keeper --pg-su-user ... Up 5432/tcp -keeper-01 stolon-keeper --pg-su-user ... Up 5432/tcp -keeper-02 stolon-keeper --pg-su-user ... Up 5432/tcp -stolon_proxy_1 stolon-proxy --store-endpo ... Up 0.0.0.0:32794->25432/tcp, 5432/tcp -stolon_sentinel_1 stolon-sentinel --store-en ... Up 5432/tcp -``` - -To make sure the etcd cluster is running: -```sh -$ docker-compose exec etcd-00 etcdctl cluster-health -member 7883f95c8b8e92b is healthy: got healthy result from http://etcd-00:2379 -member 7da9f70288fafe07 is healthy: got healthy result from http://etcd-01:2379 -member 93b4b1aeeb764068 is healthy: got healthy result from http://etcd-02:2379 -cluster is healthy -``` - -To scale the number of Keeper instances: -```sh -$ docker-compose scale keeper=3 -``` - -Notice that Sentinel will detect the new Keeper instances: -```sh -$ docker-compose logs -f sentinel -Attaching to stolon_sentinel_1 -...... -sentinel_1 | [I] 2016-12-25T06:23:29Z sentinel.go:1009: added new standby db db=22c06996 keeper=56fdf72c -sentinel_1 | [I] 2016-12-25T06:23:29Z sentinel.go:1009: added new standby db db=5583be81 keeper=5967f252 -...... -``` - -To make sure you can connect to the Stolon cluster using the Proxy's published port: -```sh -$ docker-compose port proxy 25432 -0.0.0.0:32794 - -$ psql -h 127.0.0.1 -p 32794 -U postgres -Password for user postgres: # can be obtained from your local secrets/pgsql -psql (9.6.1) -Type "help" for help. - -postgres=# -``` - -Now you can run some SQL query tests against the cluster: -```sh -postgres=# CREATE TABLE test (id INT PRIMARY KEY NOT NULL, value TEXT NOT NULL); -CREATE TABLE -postgres=# INSERT INTO test VALUES (1, 'value1'); -INSERT 0 1 -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To make sure that the replication is working correctly, you will have to stop the master Keeper container using `docker stop` since Docker Compose commands only work with Compose services. Note that scaling down the Keeper services using `docker-compose scale` won't work because you won't have control over which replicas to stop. The `docker-compose logs -f sentinel` command can be used to determine the master keeper's ID, and monitor the failover process. - -For example, -```sh -$ docker-compose logs -f sentinel -..... -[I] 2016-12-20T02:16:36Z sentinel.go:576: initializing cluster keeper=db0f03a1 # <--- this is the master keeper -..... -$ docker stop stolon_keeper_1 # your master keeper might be different -$ docker-compose logs -f sentinel -..... -sentinel_1 | [I] 2016-12-25T06:40:47Z sentinel.go:743: master db is failed db=0d80aba2 keeper=c7280058 -sentinel_1 | [I] 2016-12-25T06:40:47Z sentinel.go:754: trying to find a new master to replace failed master -sentinel_1 | [I] 2016-12-25T06:40:47Z sentinel.go:785: electing db as the new master db=8b4e0342 keeper=1bfb47ae -sentinel_1 | [E] 2016-12-25T06:40:52Z sentinel.go:234: no keeper info available db=0d80aba2 keeper=c7280058 -sentinel_1 | [I] 2016-12-25T06:40:52Z sentinel.go:844: removing old master db db=0d80aba2 -..... -``` - -Once the failover process is completed, you will be able to resume your `psql` session. -```sh -postgres=# SELECT * FROM test; -server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -The connection to the server was lost. Attempting reset: Succeeded. -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To destroy the entire cluster, run `make compose-clean` to stop and remove all etcd and stolon containers. By default, the Keepers' volumes aren't removed. To purge the containers and their respective volumes, use `make compose-purge`. - -### Known Issues -The Sentinel seems unable to detect removed Keeper instances when the service is scaled down using `docker-compose scale`. To reproduce, - -1. Scale up the Keeper service with `docker-compose scale keeper=3`. -1. Scale down the service with `docker-compose scale keeper=1`. -1. The Sentinel logs show that Sentinel continues to probe the removed replicas. - -## Docker Swarm -This example sets up a Stolon cluster with Docker swarm on either your local environment with Virtualbox + Boot2Docker or DigitalOcean. It uses Docker Machine to set up 1 Swarm Manager and 3 Swarm workers. The Stolon cluster is made up of 1 Sentinel instance, 3 Keeper instances, 1 Proxy instance and 3 etc instances. Note that the droplets created on DigitalOcean aren't free. - -To get started, set up the VMs using either: -```sh -$ make swarm-local -``` -or -```sh -$ DO_ACCESS_TOKEN= DO_SSH_KEY_FINGERPRINT= make swarm-droplets -``` -The DigitalOcean access token and SSH key fingerprint can be generated and obtained from the DigitalOcean web console. - -To confirm that the VMs are running: -```sh -$ docker-machine ls -NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS -swarm-manager - digitalocean Running tcp://xxx.xxx.xxx.xxx:2376 v1.12.5 -swarm-worker-00 - digitalocean Running tcp://xxx.xxx.xxx.xxx:2376 v1.12.5 -swarm-worker-01 - digitalocean Running tcp://xxx.xxx.xxx.xxx:2376 v1.12.5 -swarm-worker-02 - digitalocean Running tcp://xxx.xxx.xxx.xxx:2376 v1.12.5 -``` - -Then you can initialize Docker Swarm with: -```sh -$ make swarm-init -``` -This configures the `swarm-manager` VM to be the Swarm Leader Manager with `swarm-worker-00`, `swarm-worker-01` and `swarm-worker-02` serving as the Swarm Workers. - -To confirm that the Swarm cluster is configured correctly: -```sh -$ eval `docker-machine env swarm-manager` -$ docker node ls -ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS -6ya2x5bmdear0gsgibule19uv swarm-worker-02 Ready Active -8m2ugdyw0rb7dnum9xgeratbi * swarm-manager Ready Drain Leader -9g7vaofjs8eysuax95mghn9sb swarm-worker-01 Ready Active -f55xnxwkjeujjlo5cgcq7uq14 swarm-worker-00 Ready Active -``` -The Swarm Manager is configured to have `Drain` availability implying that no service tasks (i.e. containers) will be scheduled to run on it. - -Once all the nodes (i.e. VMs) are live, the Stolon component images must be built on all VMs before any Stolon containers can be run: -```sh -make swarm-build -``` -This target runs the `docker build` command on every node in the Swarm cluster. It will take a few minutes to complete the build. - -To confirm that the build completed successfully: -```sh -$ eval `docker-machine env swarm-manager` -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -sorintlab/stolon-proxy 0.5.0 9edc0d485664 4 hours ago 301 MB -sorintlab/stolon-keeper 0.5.0 9008fdcb765d 4 hours ago 290.1 MB -sorintlab/stolon-sentinel 0.5.0 e7ff51216416 4 hours ago 301.3 MB -postgres 9.6.1 0e24dd8079dc 11 days ago 264.9 MB - -$ eval `docker-machine env swarm-worker-00` -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -sorintlab/stolon-proxy 0.5.0 vsdd5654d854 4 hours ago 301 MB -sorintlab/stolon-keeper 0.5.0 9d4th77dfbsw 4 hours ago 290.1 MB -sorintlab/stolon-sentinel 0.5.0 u8d9cxjs0psf 4 hours ago 301.3 MB -postgres 9.6.1 08s7sdfmvaws 11 days ago 264.9 MB - -$ eval `docker-machine env swarm-worker-01` -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -sorintlab/stolon-proxy 0.5.0 9edc0d485664 4 hours ago 301 MB -sorintlab/stolon-keeper 0.5.0 9008fdcb765d 4 hours ago 290.1 MB -sorintlab/stolon-sentinel 0.5.0 e7ff51216416 4 hours ago 301.3 MB -postgres 9.6.1 0e24dd8079dc 11 days ago 264.9 MB - -$ eval `docker-machine env swarm-worker-02` -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -sorintlab/stolon-proxy 0.5.0 a1bc2beb28b7 4 hours ago 301 MB -sorintlab/stolon-keeper 0.5.0 a50fb056a39a 4 hours ago 290.1 MB -sorintlab/stolon-sentinel 0.5.0 4bdf9eb2b345 4 hours ago 301.3 MB -postgres 9.6.1 -``` - -Once the images are built, you can create the Stolon cluster with: -```sh -$ eval `docker-machine env swarm-manager` -$ ETCD_TOKEN= make swarm-stolon -``` - -To confirm that all the services are running: -```sh -$ eval `docker-machine env swarm-manager` -$ docker service ls -ID NAME REPLICAS IMAGE COMMAND -2ez3ztkbp26l etcd-02 1/1 quay.io/coreos/etcd:v3.0.15 /usr/local/bin/etcd --name=etcd-02 --data-dir=data.etcd --advertise-client-urls=http://etcd-02:2379 --listen-client-urls=http://0.0.0.0:2379 --initial-advertise-peer-urls=http://etcd-02:2380 --listen-peer-urls=http://0.0.0.0:2380 --initial-cluster=,etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 --initial-cluster-state=new --initial-cluster-token=xxxxxx -2qywfq79c0c4 etcd-00 1/1 quay.io/coreos/etcd:v3.0.15 /usr/local/bin/etcd --name=etcd-00 --data-dir=data.etcd --advertise-client-urls=http://etcd-00:2379 --listen-client-urls=http://0.0.0.0:2379 --initial-advertise-peer-urls=http://etcd-00:2380 --listen-peer-urls=http://0.0.0.0:2380 --initial-cluster=,etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 --initial-cluster-state=new --initial-cluster-token=xxxxxx -3cwu5iecxpgn etcd-01 1/1 quay.io/coreos/etcd:v3.0.15 /usr/local/bin/etcd --name=etcd-01 --data-dir=data.etcd --advertise-client-urls=http://etcd-01:2379 --listen-client-urls=http://0.0.0.0:2379 --initial-advertise-peer-urls=http://etcd-01:2380 --listen-peer-urls=http://0.0.0.0:2380 --initial-cluster=,etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 --initial-cluster-state=new --initial-cluster-token=xxxxxx -48usxupkqvc8 keeper 3/3 sorintlab/stolon-keeper:0.5.0 -5c5h3e2i787u sentinel 1/1 sorintlab/stolon-sentinel:0.5.0 -ay815zsum0xd proxy 1/1 sorintlab/stolon-proxy:0.5.0 - -$ docker service ps etcd-00 -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -2srlhftr4wp7b5tmu9gsjn70x etcd-00.1 quay.io/coreos/etcd:v3.0.15 swarm-worker-01 Running Running 2 minutes ago - -$ docker service ps etcd-01 -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -30lerzeaatfnbhfycqwk2engb etcd-01.1 quay.io/coreos/etcd:v3.0.15 swarm-worker-00 Running Running 2 minutes ago - -$ docker service ps etcd-02 -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -7kgaxnjsmz534baxxvjp32t0c etcd-02.1 quay.io/coreos/etcd:v3.0.15 swarm-worker-02 Running Running 2 minutes ago - -$ docker service ps sentinel -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -95r09ncjyzgmdx501ven7dfnq sentinel.1 sorintlab/stolon-sentinel:0.5.0 swarm-worker-01 Running Running 2 minutes ago - -$ docker service ps keeper -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -dxffdkfs36op4ty09h65pid2f keeper.1 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Running Running 2 minutes ago -6tv7ohrcfvmb0k2d5li99nw9l keeper.2 sorintlab/stolon-keeper:0.5.0 swarm-worker-02 Running Running 2 minutes ago -0590gdetd56bx4prritou1tn2 keeper.3 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Running Running 2 minutes ago - -$ docker service ps proxy -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -cgc914ycdf5pbd4sdinr2wpmt proxy.1 sorintlab/stolon-proxy:0.5.0 swarm-worker-00 Running Running 2 minutes ago -``` - -To make sure the etcd cluster is healthy, determine the node that any of the etcd instance is scheduled on using `docker service ps`, and run `docker exec` against that node. For example: -```sh -# determine which node etcd-00 is on -$ docker service ps etcd-00 -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -2srlhftr4wp7b5tmu9gsjn70x etcd-00.1 quay.io/coreos/etcd:v3.0.15 swarm-worker-01 Running Running 2 minutes ago - -# look for its container on swarm-worker-01 -$ eval `docker-machine env swarm-worker-01` -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -1493ebf68c0f quay.io/coreos/etcd:v3.0.15 "/usr/local/bin/etcd " 5 minutes ago Up 5 minutes 2379-2380/tcp etcd-00.1.2srlhftr4wp7b5tmu9gsjn70x - -# use etcdctl to check cluster health -$ docker exec etcd-00.1.2srlhftr4wp7b5tmu9gsjn70x etcdctl cluster-health -member 7883f95c8b8e92b is healthy: got healthy result from http://etcd-00:2379 -member 7da9f70288fafe07 is healthy: got healthy result from http://etcd-01:2379 -member 93b4b1aeeb764068 is healthy: got healthy result from http://etcd-02:2379 -cluster is healthy -``` - -To scale the number of Keeper instances: -```sh -$ eval `docker-machine env swarm-manager` -$ docker service scale keeper=4 -$ docker service ps keeper -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -27v9yj0h1cwdojjaty05scwfk keeper.1 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Running Running 5 minutes ago -8wji81wrsrflfkbs59ne8m9n7 \_ keeper.1 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Shutdown Shutdown 5 minutes ago -azt0qs5dl2rtnoyqr36zpfdbl keeper.2 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Running Running 5 minutes ago -6pcxbnuwcesdhws3dz7k8poo8 \_ keeper.2 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Shutdown Shutdown 5 minutes ago -2g331ue8bgdqtxb02xbsw3wgk \_ keeper.2 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Shutdown Shutdown 5 minutes ago -1osn0uj153vvkw6xpxbp4x858 keeper.3 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Running Running 5 minutes ago -1y05i7ilhbnwmgods78nfyk28 \_ keeper.3 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Shutdown Shutdown 5 minutes ago -du8t6w8c75rhdjidtnw8e2186 keeper.4 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Running Running about a minute ago -6k7skt591htszhc7qxmr9izf0 keeper.5 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Running Running about a minute ago -1c62gdxih05lkgcjs8g8oc0bc keeper.6 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Running Running about a minute ago -``` -Check the Sentinel's logs to see the new Keeper joins the cluster. - -To make sure that you can access the Stolon cluster, -```sh -$ docker service inspect --pretty proxy -.... -Ports: - Protocol = tcp - TargetPort = 25432 - PublishedPort = 30000 -$ psql -U postgres -h -p 30000 -Password for user postgres: # can be obtained from your etc/secrets/pgsql -psql (9.6.1) -Type "help" for help. - -postgres=# -``` - -Now you can run some SQL query tests against the cluster: -```sh -postgres=# CREATE TABLE test (id INT PRIMARY KEY NOT NULL, value TEXT NOT NULL); -CREATE TABLE -postgres=# INSERT INTO test VALUES (1, 'value1'); -INSERT 0 1 -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To make sure that the replication is working correctly, stop the master Keeper container using `docker stop`. The `docker logs -f sentinel` command can be used to determine the master keeper's ID, and monitor the failover process. - -For example, -```sh -# find the node the Sentinel is on -$ eval `docker-machine env swarm-manager` -$ docker service ps sentinel -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -95r09ncjyzgmdx501ven7dfnq sentinel.1 sorintlab/stolon-sentinel:0.5.0 swarm-worker-01 Running Running 11 minutes ago - -# use the Sentinel's log to determine the master Keeper -$ eval `docker-machine env swarm-worker-01` -$ docker logs -f sentinel.1.95r09ncjyzgmdx501ven7dfnq -..... -[I] 2016-12-26T04:20:44Z sentinel.go:576: initializing cluster keeper=76a713c4 # <----- This is the master keeper -..... - -# assume that the master Keeper is container keeper.1.dxffdkfs36op4ty09h65pid2f on swarm-worker-02... -$ eval `docker-machine env swarm-worker-02` -$ docker service ps keeper -ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR -dxffdkfs36op4ty09h65pid2f keeper.1 sorintlab/stolon-keeper:0.5.0 swarm-worker-01 Running Running 11 minutes ago -6tv7ohrcfvmb0k2d5li99nw9l keeper.2 sorintlab/stolon-keeper:0.5.0 swarm-worker-02 Running Running 11 minutes ago -0590gdetd56bx4prritou1tn2 keeper.3 sorintlab/stolon-keeper:0.5.0 swarm-worker-00 Running Running 11 minutes ago -$ docker logs -f keeper.1.dxffdkfs36op4ty09h65pid2f -.... -[I] 2016-12-26T04:20:55Z keeper.go:1124: our db requested role is master -[I] 2016-12-26T04:20:55Z postgresql.go:191: starting database -[I] 2016-12-26T04:20:56Z keeper.go:1144: already master - -# stop the master Keeper container -$ docker stop keeper.1.dxffdkfs36op4ty09h65pid2f - -# examine Sentinel's log -$ docker logs -f sentinel.1.95r09ncjyzgmdx501ven7dfnq -..... -[E] 2016-12-26T04:48:20Z sentinel.go:234: no keeper info available db=d106c3c6 keeper=76a713c4 -[I] 2016-12-26T04:48:20Z sentinel.go:743: master db is failed db=d106c3c6 keeper=76a713c4 -[I] 2016-12-26T04:48:20Z sentinel.go:754: trying to find a new master to replace failed master -[I] 2016-12-26T04:48:20Z sentinel.go:785: electing db as the new master db=3d665ac6 keeper=ef175c41 -[E] 2016-12-26T04:48:25Z sentinel.go:234: no keeper info available db=5fe088f8 keeper=f02709c1 -[E] 2016-12-26T04:48:25Z sentinel.go:234: no keeper info available db=d106c3c6 keeper=76a713c4 -[E] 2016-12-26T04:48:30Z sentinel.go:234: no keeper info available db=5fe088f8 keeper=f02709c1 -[E] 2016-12-26T04:48:30Z sentinel.go:234: no keeper info available db=d106c3c6 keeper=76a713c4 -[I] 2016-12-26T04:48:30Z sentinel.go:844: removing old master db db=d106c3c6 -..... -``` - -Once the failover process is completed, you will be able to resume your `psql` session. -```sh -postgres=# SELECT * FROM test; -server closed the connection unexpectedly - This probably means the server terminated abnormally - before or while processing the request. -The connection to the server was lost. Attempting reset: Succeeded. -postgres=# SELECT * FROM test; - id | value -----+-------- - 1 | value1 -(1 row) -``` - -To remove all the services in the swarm, run `make swarm-clean`. This will stop and remove all etcd and stolon containers. By default, all the nodes aren't removed. To destroy all the nodes, use `make swarm-destroy`. - -### Known Issues -At the time of this writing, there are no ways to view the services' logs directly in Docker 1.12. Refer this issue [here](https://github.com/portainer/portainer/issues/334). The workaround involves using `docker service ps` to determine which node the service task is scheduled to and run `docker logs` on that node. diff --git a/examples/docker/etc/init-spec.json b/examples/docker/etc/init-spec.json deleted file mode 100644 index d9d5272c9..000000000 --- a/examples/docker/etc/init-spec.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "initMode":"new" -} diff --git a/examples/docker/etc/keeper-entrypoint.sh b/examples/docker/etc/keeper-entrypoint.sh deleted file mode 100755 index 60cb33909..000000000 --- a/examples/docker/etc/keeper-entrypoint.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# The command-line options specified in this script are expected NOT to change. -# To override the Keeper's entrypoint, you will have to provide the appropriate values for all the options. In particular, the user assigned to `--pg-su-username` must exist in the container, and `--pg-listen-address` must be unique, discoverable and accessible by other containers and replicas of Keeper. - -stolon-keeper \ - --pg-su-username=postgres \ - --pg-repl-username=repluser \ - --pg-listen-address=$HOSTNAME diff --git a/examples/docker/local-clean.sh b/examples/docker/local-clean.sh deleted file mode 100755 index 0a557e55d..000000000 --- a/examples/docker/local-clean.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -if [ "$PURGE_VOLUMES" ] -then - purge_volumes="-v" -fi - -docker rm -f $purge_volumes stolon-sentinel stolon-proxy - -declare -a etcd_nodes -etcd_nodes=(etcd-00 etcd-01 etcd-02) -for node in "${etcd_nodes[@]}" -do - docker rm -f $purge_volumes $node -done - -declare -a keepers -keepers=(stolon-keeper-00 stolon-keeper-01 stolon-keeper-02) -for keeper in "${keepers[@]}" -do - docker rm -f $purge_volumes $keeper -done - -docker network rm stolon-network diff --git a/examples/docker/local-up.sh b/examples/docker/local-up.sh deleted file mode 100755 index 31614b071..000000000 --- a/examples/docker/local-up.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -set -e - -usage() { - cat < : +``` + +It's highly recommended to set labels on each node so that we can set deployment constraints. +You can use to following command to set a `nodename` label equal to `node1` on the first node: + +``` +docker node update --label-add nodename=node1 +``` + +Repeat this command to set a different label for each node. Allowed values for `` can be found using this command: +``` +docker node ls +``` + + +### Initialize the etcdv3 backend + +Now, before starting any stolon component, we need to start the etcdv3 backend. This can easily be done using the provided `docker-compose-etcd.yml` file: + +``` +docker stack deploy --compose-file docker-compose-etcd.yml etcd +``` + +You can check everything is fine using this command: + +``` +docker service ls +ID NAME MODE REPLICAS IMAGE PORTS +pbjr4k9285iy etcd_etcd-00 replicated 1/1 quay.io/coreos/etcd:v3.2.10 *:2379->2379/tcp +7bc4d0e0qf2y etcd_etcd-01 replicated 1/1 quay.io/coreos/etcd:v3.2.10 +2dcqqmi5li0v etcd_etcd-02 replicated 1/1 quay.io/coreos/etcd:v3.2.10 +``` + +### Initialize the cluster + +All the stolon components wait for an existing clusterdata entry in the store. So the first time you have to initialize a new cluster. For more details see the [cluster initialization doc](/doc/initialization.md). You can do this step at every moment, now or after having started the stolon components. + +You can execute stolonctl from a machine that can access the store backend: + +``` +stolonctl --cluster-name=stolon-cluster --store-backend=etcdv3 --store-endpoints http://localhost:2379 init +``` + +### Create sentinel(s), keepers and proxy(ies) + +To create all the stolon components, we just have to create a new stack based on the file `docker-compose-pg.yml`. +Before creating the stack, in case you have several nodes, it's highly recommended to set constraints so that each keeper +will start on its own node. +Assuming you have 2 nodes with labels `nodename=node1` and `nodename=node2`, just edit the file `docker-compose-pg.yml` +and uncomment the lines: +``` + placement: + constraints: [node.labels.nodename == node1] +``` +and +``` + placement: + constraints: [node.labels.nodename == node2] +``` + +If you have only one node, just leave the file unchanged. +Now, enter the following command to create the new stack with all stolon components: + +``` +docker stack deploy --compose-file docker-compose-pg.yml pg +``` + +This will create 2 sentinels, 2 proxies and 2 keepers. +You can check everything is fine using this command: + +``` +docker service ls +ID NAME MODE REPLICAS IMAGE PORTS +pbjr4k9285iy etcd_etcd-00 replicated 1/1 quay.io/coreos/etcd:v3.2.17 *:2379->2379/tcp +7bc4d0e0qf2y etcd_etcd-01 replicated 1/1 quay.io/coreos/etcd:v3.2.17 +2dcqqmi5li0v etcd_etcd-02 replicated 1/1 quay.io/coreos/etcd:v3.2.17 +k2vh3ff0acpg pg_keeper1 replicated 1/1 sorintlab/master-pg10:latest +jbm195xsalwu pg_keeper2 replicated 1/1 sorintlab/master-pg10:latest +sf79wnygtcmu pg_proxy replicated 2/2 sorintlab/master-pg10:latest *:5432->5432/tcp +tkbnrfdj4axa pg_sentinel replicated 2/2 sorintlab/master-pg10:latest +``` + +### Connect to the db + +#### Connect to the proxy service + +The password for the stolon user will be the value specified in your `./etc/secrets/pgsql` file (or `password1` if you did not change it). + +``` +psql --host localhost --port 5432 postgres -U postgres -W +Password for user postgres: +psql (10.3, server 10.3) +Type "help" for help. + +postgres=# +``` + +### Create a test table and insert a row + +``` +postgres=# create table test (id int primary key not null, value text not null); +CREATE TABLE +postgres=# insert into test values (1, 'value1'); +INSERT 0 1 +postgres=# select * from test; + id | value +----+-------- + 1 | value1 +(1 row) +``` + +### Simulate master death + +There are different ways to tests this. In a multi node setup you can just shutdown the host executing the master keeper service. + +In a single node setup we can kill the current master keeper service but the swarm will restart it before the sentinel declares it as failed. +To avoid the restart, we'll scale to 0 replica the master keeper. Assuming it is keeper1, use the following command: + +``` +docker service scale pg_keeper1=0 +``` + +You can take a look at the leader sentinel log and will see that after some seconds it'll declare the master keeper as not healthy and elect the other one as the new master: +``` +no keeper info available db=cb96f42d keeper=keeper0 +no keeper info available db=cb96f42d keeper=keeper0 +master db is failed db=cb96f42d keeper=keeper0 +trying to find a standby to replace failed master +electing db as the new master db=087ce88a keeper=keeper1 +``` + +Now, inside the previous `psql` session you can redo the last select. The first time `psql` will report that the connection was closed and then it successfully reconnected: + +``` +postgres=# select * from test; +server closed the connection unexpectedly + This probably means the server terminated abnormally + before or while processing the request. +The connection to the server was lost. Attempting reset: Succeeded. +postgres=# select * from test; + id | value +----+-------- + 1 | value1 +(1 row) +``` + +### Scale your cluster keepers + +You can add additional stolon keepers by duplicating an existing keeper definition. You need to specify a dedicated volume for each keeper. + +### Scale your cluster sentinels and proxies + +You can increase/decrease the number of stolon sentinels and proxies: + +``` +docker service scale pg_sentinel=3 +``` + +``` +docker service scale pg_proxy=3 +``` + +### Update image + +For PostgreSQL major version upgrade, see [PostgreSQL upgrade](postgresql_upgrade.md) + +For any PostgreSQL upgrade, check PostgreSQL release note for any additional upgrade note. + +For stolon upgrade: TODO diff --git a/examples/docker/docker-compose.yml b/examples/swarm/docker-compose-etcd.yml similarity index 51% rename from examples/docker/docker-compose.yml rename to examples/swarm/docker-compose-etcd.yml index 6643b4a0a..3eec42123 100644 --- a/examples/docker/docker-compose.yml +++ b/examples/swarm/docker-compose-etcd.yml @@ -1,54 +1,9 @@ -version: '2.1' -services: - sentinel: - build: - context: . - dockerfile: Dockerfile-Sentinel - image: ${IMAGE_TAG_SENTINEL} - environment: - - STSENTINEL_STORE_ENDPOINTS=http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 - networks: - - stolon-network - depends_on: - - etcd-00 - - etcd-01 - - etcd-02 - - keeper: - build: - context: . - dockerfile: Dockerfile-Keeper - image: ${IMAGE_TAG_KEEPER} - volumes: - - ./etc/secrets/pgsql:${STOLON_KEEPER_PG_SU_PASSWORDFILE} - - ./etc/secrets/pgsql:${STOLON_KEEPER_PG_REPL_PASSWORDFILE} - environment: - - ETCD_TOKEN=${ETCD_TOKEN} - - STKEEPER_STORE_ENDPOINTS=http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 - - STKEEPER_PG_SU_PASSWORDFILE=${STOLON_KEEPER_PG_SU_PASSWORDFILE} - - STKEEPER_PG_REPL_PASSWORDFILE=${STOLON_KEEPER_PG_REPL_PASSWORDFILE} - networks: - - stolon-network - - proxy: - build: - context: . - dockerfile: Dockerfile-Proxy - image: ${IMAGE_TAG_PROXY} - environment: - - STPROXY_STORE_ENDPOINTS=http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 - networks: - - stolon-network - ports: - - ${STOLON_PROXY_PORT} - depends_on: - - etcd-00 - - etcd-01 - - etcd-02 +version: '3.4' +services: etcd-00: - image: quay.io/coreos/etcd:${ETCD_VERSION} - container_name: etcd-00 + image: quay.io/coreos/etcd:v3.2.17 + hostname: etcd-00 command: - etcd - --name=etcd-00 @@ -60,13 +15,18 @@ services: - --initial-cluster=etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 - --initial-cluster-state=new - --initial-cluster-token=${ETCD_TOKEN} + volumes: + - etcd-00vol:/data.etcd networks: - - stolon-network + - etcd + ports: + - 2379:2379 + deploy: + replicas: 1 etcd-01: - extends: - service: etcd-00 - container_name: etcd-01 + image: quay.io/coreos/etcd:v3.2.17 + hostname: etcd-01 command: - etcd - --name=etcd-01 @@ -78,11 +38,16 @@ services: - --initial-cluster=etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 - --initial-cluster-state=new - --initial-cluster-token=${ETCD_TOKEN} + volumes: + - etcd-01vol:/data.etcd + networks: + - etcd + deploy: + replicas: 1 etcd-02: - extends: - service: etcd-00 - container_name: etcd-02 + image: quay.io/coreos/etcd:v3.2.17 + hostname: etcd-02 command: - etcd - --name=etcd-02 @@ -94,7 +59,24 @@ services: - --initial-cluster=etcd-00=http://etcd-00:2380,etcd-01=http://etcd-01:2380,etcd-02=http://etcd-02:2380 - --initial-cluster-state=new - --initial-cluster-token=${ETCD_TOKEN} + volumes: + - etcd-02vol:/data.etcd + networks: + - etcd + deploy: + replicas: 1 + +volumes: + etcd-00vol: + driver: local + etcd-01vol: + driver: local + etcd-02vol: + driver: local networks: - stolon-network: - driver: bridge + etcd: + driver: overlay + driver_opts: + encrypted: "true" + internal: true diff --git a/examples/swarm/docker-compose-pg.yml b/examples/swarm/docker-compose-pg.yml new file mode 100644 index 000000000..b63cad7a6 --- /dev/null +++ b/examples/swarm/docker-compose-pg.yml @@ -0,0 +1,90 @@ +version: '3.4' + +secrets: + pgsql: + file: ./etc/secrets/pgsql + pgsql_repl: + file: ./etc/secrets/pgsql_repl + +services: + sentinel: + image: sorintlab/master-pg10:latest + command: gosu stolon stolon-sentinel --cluster-name stolon-cluster --store-backend=etcdv3 --store-endpoints http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 --log-level debug + networks: + - etcd_etcd + - pgdb + deploy: + replicas: 2 + update_config: + parallelism: 1 + delay: 30s + order: stop-first + failure_action: pause + + keeper1: + image: sorintlab/master-pg10:latest + hostname: keeper1 + environment: + - PGDATA=/var/lib/postgresql/data + volumes: + - pgkeeper1:/var/lib/postgresql/data + secrets: + - pgsql + - pgsql_repl + command: gosu stolon stolon-keeper --pg-listen-address keeper1 --pg-repl-username replication --uid keeper1 --pg-su-username postgres --pg-su-passwordfile /run/secrets/pgsql --pg-repl-passwordfile /run/secrets/pgsql_repl --data-dir /var/lib/postgresql/data --cluster-name stolon-cluster --store-backend=etcdv3 --store-endpoints http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 --log-level debug + networks: + - etcd_etcd + - pgdb + deploy: + replicas: 1 +# placement: +# constraints: [node.labels.nodename == node1] + + keeper2: + image: sorintlab/master-pg10:latest + hostname: keeper2 + environment: + - PGDATA=/var/lib/postgresql/data + volumes: + - pgkeeper2:/var/lib/postgresql/data + secrets: + - pgsql + command: gosu stolon stolon-keeper --pg-listen-address keeper2 --pg-repl-username replication --uid keeper2 --pg-su-username postgres --pg-su-passwordfile /run/secrets/pgsql --pg-repl-passwordfile /run/secrets/pgsql --data-dir /var/lib/postgresql/data --cluster-name stolon-cluster --store-backend=etcdv3 --store-endpoints http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 --log-level debug + networks: + - etcd_etcd + - pgdb + deploy: + replicas: 1 +# placement: +# constraints: [node.labels.nodename == node2] + + proxy: + image: sorintlab/master-pg10:latest + command: gosu stolon stolon-proxy --listen-address 0.0.0.0 --cluster-name stolon-cluster --store-backend=etcdv3 --store-endpoints http://etcd-00:2379,http://etcd-01:2379,http://etcd-02:2379 --log-level info + networks: + - etcd_etcd + - pgdb + ports: + - 5432:5432 + deploy: + replicas: 2 + update_config: + parallelism: 1 + delay: 30s + order: stop-first + failure_action: rollback + +volumes: + pgkeeper1: + driver: local + pgkeeper2: + driver: local + +networks: + etcd_etcd: + external: true + pgdb: + driver: overlay + driver_opts: + encrypted: "true" + internal: true diff --git a/examples/swarm/etc/secrets/pgsql b/examples/swarm/etc/secrets/pgsql new file mode 100644 index 000000000..06a351c01 --- /dev/null +++ b/examples/swarm/etc/secrets/pgsql @@ -0,0 +1 @@ +password1 \ No newline at end of file diff --git a/examples/swarm/etc/secrets/pgsql_repl b/examples/swarm/etc/secrets/pgsql_repl new file mode 100644 index 000000000..06a351c01 --- /dev/null +++ b/examples/swarm/etc/secrets/pgsql_repl @@ -0,0 +1 @@ +password1 \ No newline at end of file