From f860d69f6c9a26e5867a244000cc33b132d8115d Mon Sep 17 00:00:00 2001 From: Andi Date: Fri, 16 Aug 2024 14:54:01 +0200 Subject: [PATCH] Add guide for deploying Memgraph HA to AWS (#58) Add a guide for deploying Memgraph HA to AWS. --- .../memgraph-high-availability/aws/README.md | 82 +++++++++++++++++++ .../aws/cluster.yaml | 75 +++++++++++++++++ 2 files changed, 157 insertions(+) create mode 100644 charts/memgraph-high-availability/aws/README.md create mode 100644 charts/memgraph-high-availability/aws/cluster.yaml diff --git a/charts/memgraph-high-availability/aws/README.md b/charts/memgraph-high-availability/aws/README.md new file mode 100644 index 0000000..2879872 --- /dev/null +++ b/charts/memgraph-high-availability/aws/README.md @@ -0,0 +1,82 @@ +## Description + +This guide instructs users on how to deploy Memgraph HA to AWS EKS. It serves only as a starting point and there are many ways possible to extend what is currently here. In this setup +each Memgraph database is deployed to separate, `t3.small` node in the `eu-west-1` AWS region. + +## Installation + +You will need: +- [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [eksctl](https://docs.aws.amazon.com/eks/latest/userguide/setting-up.html) +- [helm](https://helm.sh/docs/intro/install/) + +We used `kubectl 1.30.0, aws 2.17.29, eksctl 0.188.0 and helm 3.14.4`. + +## Configure AWS CLI + +Use `aws configure` and enter your `AWS Access Key ID, Secret Access Key, Region and output format`. + +## Create EKS Cluster + +We provide you with the sample configuration file for AWS in this folder. Running + +``` +eksctl create cluster -f cluster.yaml` +``` + +should be sufficient. Make sure to change the path to the public SSH key if you want to have SSH access to EC2 instances. After creating the cluster, `kubectl` should pick up +the AWS context and you can verify this by running `kubectl context current-context`. My is pointing to `andi.skrgat@test-cluster-ha.eu-west-1.eksctl.io`. + +## Add Helm Charts repository + +If you don't have installed Memgraph Helm repo, please make sure you by running: + +``` +helm repo add memgraph https://memgraph.github.io/helm-charts +helm repo list +helm repo update +``` + +## Install the AWS CSI driver + +Once EKS nodes are started, you need to install AWS Elastic Block Store CSI driver so the cluster can auto-manage EBS resources from AWS. Run the following: + +``` +kubectl apply -k "github.com/kubernetes-sigs/aws-ebs-csi-driver/deploy/kubernetes/overlays/stable/ecr/?ref=release-1.25" +``` + +## Authentication and authorization + +Before deploying the cluster, you need to provide access to the NodeInstanceRole. First find the name of the role with + +``` +aws eks describe-nodegroup --cluster-name test-cluster-ha --nodegroup-name standard-workers +``` + +and then provide full access to it: + +``` +aws iam list-attached-role-policies --role-name eksctl-test-cluster-ha-nodegroup-s-NodeInstanceRole- +``` + +It is also important to create Inbound Rule in the Security Group attached to the eksctl cluster which will allow TCP traffic +on ports 30000-32767. We find it easiest to modify this by going to the EC2 Dashboard. + + +## Deploy Memgraph cluster + +The only step left is to deploy the cluster using + +``` +helm install mem-ha-test ./charts/memgraph-high-availability --set \ +memgraph.env.MEMGRAPH_ENTERPRISE_LICENSE=, \ +memgraph.env.MEMGRAPH_ORGANIZATION_NAME=, \ +memgraph.data.volumeClaim.storagePVCClassName=gp2, \ +memgraph.coordinators.volumeClaim.storagePVCClassName=gp2, \ +memgraph.data.volumeClaim.logPVCClassName=gp2, \ +memgraph.coordinators.volumeClaim.logPVCClassName=gp2, \ +memgraph.affinity.enabled=true +``` + +You can check the state of the cluster with `kubectl get pods -o wide`. diff --git a/charts/memgraph-high-availability/aws/cluster.yaml b/charts/memgraph-high-availability/aws/cluster.yaml new file mode 100644 index 0000000..acec423 --- /dev/null +++ b/charts/memgraph-high-availability/aws/cluster.yaml @@ -0,0 +1,75 @@ +accessConfig: + authenticationMode: API_AND_CONFIG_MAP +addonsConfig: {} +apiVersion: eksctl.io/v1alpha5 +availabilityZones: +- eu-west-1a +- eu-west-1c +- eu-west-1b +cloudWatch: + clusterLogging: {} +iam: + vpcResourceControllerPolicy: true + withOIDC: false +kind: ClusterConfig +kubernetesNetworkConfig: + ipFamily: IPv4 +managedNodeGroups: +- amiFamily: AmazonLinux2 + desiredCapacity: 5 + disableIMDSv1: true + disablePodIMDS: false + iam: + withAddonPolicies: + albIngress: false + appMesh: false + appMeshPreview: false + autoScaler: false + awsLoadBalancerController: false + certManager: false + cloudWatch: false + ebs: false + efs: false + externalDNS: false + fsx: false + imageBuilder: false + xRay: false + instanceSelector: {} + instanceType: t3.small + labels: + alpha.eksctl.io/cluster-name: test-cluster-ha + alpha.eksctl.io/nodegroup-name: standard-workers + maxSize: 5 + minSize: 5 + name: standard-workers + privateNetworking: false + releaseVersion: "" + securityGroups: + withLocal: null + withShared: null + ssh: + allow: true + publicKeyPath: ~/.ssh/id_rsa.pub + tags: + alpha.eksctl.io/nodegroup-name: standard-workers + alpha.eksctl.io/nodegroup-type: managed + volumeIOPS: 3000 + volumeSize: 80 + volumeThroughput: 125 + volumeType: gp3 +metadata: + name: test-cluster-ha + region: eu-west-1 + version: "1.30" +privateCluster: + enabled: false + skipEndpointCreation: false +vpc: + autoAllocateIPv6: false + cidr: 192.168.0.0/16 + clusterEndpoints: + privateAccess: false + publicAccess: true + manageSharedNodeSecurityGroupRules: true + nat: + gateway: Single