diff --git a/install.sh b/install.sh
index 151af73..91ea79a 100644
--- a/install.sh
+++ b/install.sh
@@ -366,6 +366,72 @@ while true; do
fi
done
+validate_opensearch_password() {
+ local master_password=$1
+
+ # Check length (at least 8 characters)
+ if [[ ${#master_password} -lt 8 ]]; then
+ display_error "Error: Password must be at least 8 characters."
+ return 1
+ fi
+
+ # Check if it starts with an alphabet
+ if [[ ! $master_password =~ ^[A-Za-z] ]]; then
+ display_error "Error: Password must start with a letter."
+ return 1
+ fi
+
+ # Check for at least one uppercase letter and one lowercase letter
+ if [[ ! $master_password =~ [A-Z] || ! $master_password =~ [a-z] ]]; then
+ display_error "Error: Password must include at least one uppercase and one lowercase letter."
+ return 1
+ fi
+
+ # Check for at least one digit
+ if [[ ! $master_password =~ [0-9] ]]; then
+ display_error "Error: Password must include at least one digit."
+ return 1
+ fi
+
+ # Check for special characters
+ if [[ $password == [^A-Za-z0-9] ]]; then
+ display_error "Error: Password should include special characters."
+ return 1
+ fi
+
+ # read password again to confirm
+ echo "Please re-enter the password: "
+ read -r -s master_password_confirm
+ if [[ "$master_password" != "$master_password_confirm" ]]; then
+ display_error "Error: Passwords do not match."
+ return 1
+ fi
+
+ return 0
+
+}
+
+echo "Do you want to push logs to S3 and Open Search? [y/n]: "
+read -r OPEN_SEARCH_SERVICE
+
+if [[ "$OPEN_SEARCH_SERVICE" == "y" ]]; then
+ read -p "Please enter the Master UserName for Open Search Service: " OPEN_SEARCH_MASTER_USER_NAME
+ while true; do
+ echo "Please enter the Master Password for Open Search Service: "
+ read -r -s OPEN_SEARCH_MASTER_PASSWORD
+ if validate_opensearch_password "$OPEN_SEARCH_MASTER_PASSWORD"; then
+ break
+ fi
+ done
+
+elif [[ "$OPEN_SEARCH_SERVICE" == "n" ]]; then
+ break
+else
+ echo "Invalid input. Please enter 'y' or 'n'."
+ read -r OPEN_SEARCH_SERVICE
+fi
+
+
if [[ "$INSTALLATION_MODE" == 2 ]]; then
while true; do
@@ -426,7 +492,7 @@ if [[ "$INSTALLATION_MODE" == 2 ]]; then
aws iam delete-role --role-name $ROLE_NAME 2>/dev/null
cdk bootstrap aws://$AWS_ACCOUNT_ID/$AWS_DEFAULT_REGION -c aws_arn=$AWS_ARN
fi
- if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER; then
+ if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER -c open_search_service=$OPEN_SEARCH_SERVICE -c open_search_master_user_name=$OPEN_SEARCH_MASTER_USER_NAME -c open_search_master_password=$OPEN_SEARCH_MASTER_PASSWORD; then
# Wait for the EKS Cluster to be deployed
echo $(aws eks create-addon --cluster-name hs-eks-cluster --addon-name amazon-cloudwatch-observability)
diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts
index 856fe7a..5adb430 100644
--- a/lib/aws/eks.ts
+++ b/lib/aws/eks.ts
@@ -7,7 +7,7 @@ import { Construct } from "constructs";
import { Config } from "./config";
import { ElasticacheStack } from "./elasticache";
import { DataBaseConstruct } from "./rds";
-import { LogsBucket } from "./log_bucket";
+import { LogsStack } from "./log_stack";
import * as kms from "aws-cdk-lib/aws-kms";
import { readFileSync } from "fs";
import { Secret } from "aws-cdk-lib/aws-secretsmanager";
@@ -63,8 +63,12 @@ export class EksStack {
eks.ClusterLoggingTypes.SCHEDULER,
]
});
-
- const logsBucket = new LogsBucket(scope, cluster, "app-logs-s3-service-account");
+
+ let push_logs = scope.node.tryGetContext('open_search_service') || 'n';
+ if (`${push_logs}` == "y"){
+ const logsStack = new LogsStack(scope, cluster, "app-logs-s3-service-account");
+ }
+
cluster.node.addDependency(ecrTransfer.codebuildTrigger);
cdk.Tags.of(cluster).add("SubStack", "HyperswitchEKS");
diff --git a/lib/aws/log_bucket.ts b/lib/aws/log_bucket.ts
deleted file mode 100644
index d35af20..0000000
--- a/lib/aws/log_bucket.ts
+++ /dev/null
@@ -1,133 +0,0 @@
-import * as ec2 from 'aws-cdk-lib/aws-ec2';
-import * as cdk from "aws-cdk-lib";
-import { Construct } from 'constructs';
-import * as s3 from "aws-cdk-lib/aws-s3";
-import * as iam from "aws-cdk-lib/aws-iam";
-import * as eks from "aws-cdk-lib/aws-eks";
-
-
-export class LogsBucket {
- bucket: s3.Bucket;
- constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) {
- this.bucket = new s3.Bucket(scope, "LogsBucket", {
- removalPolicy: cdk.RemovalPolicy.DESTROY,
- bucketName: `application-logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`,
- });
- cluster.node.addDependency(this.bucket);
- const ns = cluster.addManifest("kube-analytics-ns", {
- "apiVersion": "v1",
- "kind": "Namespace",
- "metadata": {
- "name": "kube-analytics"
- }
- })
- const sa = cluster.addServiceAccount("app-logs-s3-service-account", {
- name: serviceAccountName,
- namespace: "kube-analytics"
- });
- sa.node.addDependency(ns);
- this.bucket.grantReadWrite(sa);
-
- const fluentdChart = cluster.addHelmChart("fluentd", {
- chart: "fluentd",
- repository: "https://fluent.github.io/helm-charts",
- namespace: "kube-analytics",
- wait: false,
- values: {
- kind: "DaemonSet",
- serviceAccount: {
- create: false,
- name: sa.serviceAccountName
- },
- fullnameOverride: "fluentd-s3",
- variant: "s3",
- labels: {
- app: "fluentd-s3"
- },
- resources: {
- limits: {
- cpu: "1",
- memory: "1200Mi"
- },
- requests: {
- cpu: "200m",
- memory: "150Mi"
- }
- },
- rbac: {
- create: false
- },
- livenessProbe: null,
- readinessProbe: null,
- service: {
- enabled: false,
- type: "ClusterIP",
- },
- image: {
- repository: "fluent/fluentd-kubernetes-daemonset",
- pullPolicy: "IfNotPresent",
- tag: "v1.16-debian-s3-1"
- },
- env: [
- {
- name: "S3_BUCKET",
- value: this.bucket.bucketName,
- },
- {
- name: "S3_REGION",
- value: process.env.CDK_DEFAULT_REGION,
- }
-
- ],
- terminationGracePeriodSeconds: 30,
- dnsPolicy: "ClusterFirst",
- restartPolicy: "Always",
- schedulerName: "default-scheduler",
- securityContext: {},
- fileConfigs:{
- "01_sources.conf":` `,
- "02_filters.conf":"",
- "03_dispatch.conf":"",
- "04_outputs.conf": `
-
- @type json
-
- @type copy
-
- @type stdout
-
-
- @type s3
- s3_bucket "#{ENV['S3_BUCKET']}"
- s3_region "#{ENV['S3_REGION']}"
- path "hyperswitch-logs/%Y/%m/%d/$\{tag\}/"
-
- @type file
- path /var/log/fluent/s3
- timekey 3600 # 1 hour partition
- timekey_wait 10m
- timekey_zone +0530
- chunk_limit_size 256m
- flush_at_shutdown
-
-
- `
-
- },
- }
-
- });
- }
-}
diff --git a/lib/aws/log_stack.ts b/lib/aws/log_stack.ts
new file mode 100644
index 0000000..2e2a0d0
--- /dev/null
+++ b/lib/aws/log_stack.ts
@@ -0,0 +1,399 @@
+import * as ec2 from 'aws-cdk-lib/aws-ec2';
+import * as cdk from "aws-cdk-lib";
+import { Construct } from 'constructs';
+import * as s3 from "aws-cdk-lib/aws-s3";
+import * as iam from "aws-cdk-lib/aws-iam";
+import * as eks from "aws-cdk-lib/aws-eks";
+import * as opensearch from 'aws-cdk-lib/aws-opensearchservice';
+import { Domain, EngineVersion, IpAddressType } from 'aws-cdk-lib/aws-opensearchservice';
+
+
+export class LogsStack {
+ bucket: s3.Bucket;
+ domain: Domain;
+ constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) {
+ this.bucket = new s3.Bucket(scope, "LogsBucket", {
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
+ bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`,
+ });
+ cluster.node.addDependency(this.bucket);
+ const loggingNS = cluster.addManifest("logging-ns", {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "name": "logging"
+ }
+ })
+ const sa = cluster.addServiceAccount("app-logs-s3-service-account", {
+ name: serviceAccountName,
+ namespace: "logging"
+ });
+ sa.node.addDependency(loggingNS);
+ this.bucket.grantReadWrite(sa);
+
+ const fluentdChart = cluster.addHelmChart("fluentd", {
+ chart: "fluentd",
+ repository: "https://fluent.github.io/helm-charts",
+ namespace: "logging",
+ wait: false,
+ values: {
+ kind: "DaemonSet",
+ serviceAccount: {
+ create: false,
+ name: sa.serviceAccountName
+ },
+ fullnameOverride: "fluentd-s3",
+ variant: "s3",
+ labels: {
+ app: "fluentd-s3"
+ },
+ resources: {
+ limits: {
+ cpu: "1",
+ memory: "1200Mi"
+ },
+ requests: {
+ cpu: "200m",
+ memory: "150Mi"
+ }
+ },
+ rbac: {
+ create: false
+ },
+ livenessProbe: null,
+ readinessProbe: null,
+ service: {
+ enabled: false,
+ type: "ClusterIP",
+ },
+ image: {
+ repository: "fluent/fluentd-kubernetes-daemonset",
+ pullPolicy: "IfNotPresent",
+ tag: "v1.16-debian-s3-1"
+ },
+ env: [
+ {
+ name: "S3_BUCKET",
+ value: this.bucket.bucketName,
+ },
+ {
+ name: "S3_REGION",
+ value: process.env.CDK_DEFAULT_REGION,
+ }
+
+ ],
+ terminationGracePeriodSeconds: 30,
+ dnsPolicy: "ClusterFirst",
+ restartPolicy: "Always",
+ schedulerName: "default-scheduler",
+ securityContext: {},
+ fileConfigs: {
+ "01_sources.conf": ` `,
+ "02_filters.conf": "",
+ "03_dispatch.conf": "",
+ "04_outputs.conf": `
+
+ @type json
+
+ @type copy
+
+ @type stdout
+
+
+ @type s3
+ s3_bucket "#{ENV['S3_BUCKET']}"
+ s3_region "#{ENV['S3_REGION']}"
+ path "hyperswitch-logs/%Y/%m/%d/$\{tag\}/"
+
+ @type file
+ path /var/log/fluent/s3
+ timekey 3600 # 1 hour partition
+ timekey_wait 10m
+ timekey_zone +0530
+ chunk_limit_size 256m
+ flush_at_shutdown
+
+
+ `
+
+ },
+ }
+
+ });
+
+ fluentdChart.node.addDependency(sa);
+
+ this.domain = new opensearch.Domain(scope, 'OpenSearch', {
+ version: opensearch.EngineVersion.OPENSEARCH_2_11,
+ enableVersionUpgrade: false,
+ ebs: {
+ volumeSize: 50,
+ volumeType: ec2.EbsDeviceVolumeType.GP3,
+ throughput: 125,
+ iops: 3000,
+ },
+ fineGrainedAccessControl: {
+ masterUserName: "admin",
+ masterUserPassword: cdk.SecretValue.unsafePlainText("Pluentd@123"),
+ },
+ nodeToNodeEncryption: true,
+ encryptionAtRest: {
+ enabled: true,
+ },
+ removalPolicy: cdk.RemovalPolicy.DESTROY,
+ enforceHttps: true,
+ zoneAwareness:{
+ enabled: true,
+ availabilityZoneCount: 2
+ },
+ capacity: {
+ dataNodes: 2,
+ dataNodeInstanceType: "r6g.large.search",
+ multiAzWithStandbyEnabled: false
+ }
+ });
+ // this.domain.grantReadWrite(new iam.AnyPrincipal());
+ const policy = new iam.PolicyStatement({
+ effect: iam.Effect.ALLOW,
+ principals: [new iam.AnyPrincipal()],
+ actions: ["es:*"],
+ resources: [`${this.domain.domainArn}/*`],
+ });
+ this.domain.addAccessPolicies(policy);
+
+ const kAnalyticsNS = cluster.addManifest("kube-analytics-ns", {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "name": "kube-analytics"
+ }
+ });
+
+ kAnalyticsNS.node.addDependency(this.domain);
+ let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name') || "admin";
+ let open_search_master_password = scope.node.tryGetContext('open_search_master_password') || "Password@123";
+
+ const openSearchFluentdChart = cluster.addHelmChart("fluentd-opensearch", {
+ chart: "fluentd",
+ repository: "https://fluent.github.io/helm-charts",
+ namespace: "kube-analytics",
+ wait: false,
+ values: {
+ kind: "DaemonSet",
+ serviceAccount: {
+ create: false,
+ name: null
+ },
+ fullnameOverride: "fluentd-opensearch",
+ variant: "opensearch",
+ labels: {
+ app: "fluentd-opensearch"
+ },
+ resources: {
+ limits: {
+ cpu: "1",
+ memory: "1200Mi"
+ },
+ requests: {
+ cpu: "200m",
+ memory: "150Mi"
+ }
+ },
+ rbac: {
+ create: true
+ },
+ livenessProbe: null,
+ readinessProbe: null,
+ service: {
+ enabled: false,
+ type: "ClusterIP",
+ },
+ image: {
+ repository: "fluent/fluentd-kubernetes-daemonset",
+ pullPolicy: "IfNotPresent",
+ tag: "v1.16-debian-opensearch-2"
+ },
+ env: [
+ {
+ name: "FLUENT_OPENSEARCH_HOST",
+ value: this.domain.domainEndpoint,
+ },
+ {
+ name: "FLUENT_OPENSEARCH_PORT",
+ value: "443",
+ },
+ {
+ name: "FLUENT_OPENSEARCH_SSL_VERIFY",
+ value: "true",
+ },
+ {
+ name: "FLUENT_OPENSEARCH_USER_NAME",
+ value: `${open_search_master_user_name}`,
+ },
+ {
+ name: "FLUENT_OPENSEARCH_PASSWORD",
+ value: `${open_search_master_password}`,
+ },
+ {
+ name: "FLUENT_OPENSEARCH_SCHEME",
+ value: "https",
+ }
+
+ ],
+ terminationGracePeriodSeconds: 30,
+ dnsPolicy: "ClusterFirst",
+ restartPolicy: "Always",
+ schedulerName: "default-scheduler",
+ securityContext: {},
+ fileConfigs: {
+ "01_sources.conf": `
+
+
+
+
+ # Hyperswitch Drainer Source
+
+
+ # HyperSwitch Producer Source
+ `,
+
+ "02_filters.conf": `
+ # Parse JSON Logs
+
+ @type parser
+
+ key_name log
+ reserve_time true
+
+ @type multi_format
+
+ format json
+ hash_value_field json_log
+ format_name 'json'
+
+
+ format regexp
+ expression /^(?.*)$/
+ format_name 'raw_message'
+
+
+
+ # Add kubernetes metadata
+
+ @type kubernetes_metadata
+ `,
+
+ "03_dispatch.conf": "",
+
+ "04_outputs.conf": `
+
+
+ @type json
+
+ @type copy
+
+ @type opensearch
+ @id hyperswitch-out_es
+ id_key _hash
+ remove_keys _hash
+ @log_level debug
+ prefer_oj_serializer true
+ reload_on_failure true
+ reload_connections false
+ user "#{ENV['FLUENT_OPENSEARCH_USER_NAME']}"
+ password "#{ENV['FLUENT_OPENSEARCH_PASSWORD']}"
+ request_timeout 120s
+ bulk_message_request_threshold 10MB
+ host "#{ENV['FLUENT_OPENSEARCH_HOST']}"
+ port "#{ENV['FLUENT_OPENSEARCH_PORT']}"
+ scheme "#{ENV['FLUENT_OPENSEARCH_SCHEME'] || 'http'}"
+ ssl_verify "#{ENV['FLUENT_OPENSEARCH_SSL_VERIFY'] || 'true'}"
+ logstash_prefix logstash-$\{tag\}
+ include_timestamp true
+ logstash_format true
+ type_name fluentd
+
+ @type file
+ path /var/log/opensearch-buffers/hyperswitch-buffer
+ flush_thread_count 6
+ flush_interval 1s
+ chunk_limit_size 5M
+ queue_limit_length 4
+ flush_mode interval
+ retry_max_interval 30
+ retry_type exponential_backoff
+ overflow_action drop_oldest_chunk
+
+
+ `
+ },
+ ingress:{
+ enabled: false,
+ }
+ }
+
+ });
+ openSearchFluentdChart.node.addDependency(kAnalyticsNS);
+
+ }
+}
diff --git a/single-click/single-click.sh b/single-click/single-click.sh
index 7bb1c82..7625a71 100644
--- a/single-click/single-click.sh
+++ b/single-click/single-click.sh
@@ -118,7 +118,7 @@ else
# cdk bootstrap aws://"$AWS_ACCOUNT_ID"/"$AWS_DEFAULT_REGION" -c aws_arn="$AWS_ARN" -c stack=imagebuilder
# cdk deploy --require-approval never -c stack=imagebuilder $AMI_OPTIONS
cdk bootstrap aws://$AWS_ACCOUNT/$AWS_REGION -c aws_arn=$AWS_ARN
- if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER; then
+ if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER -c open_search_service=$OPEN_SEARCH_SERVICE -c open_search_master_user_name=$OPEN_SEARCH_MASTER_USER_NAME -c open_search_master_password=$OPEN_SEARCH_MASTER_PASSWORD; then
echo $(aws eks create-addon --cluster-name hs-eks-cluster --addon-name amazon-cloudwatch-observability)
aws eks update-kubeconfig --region "$AWS_REGION" --name hs-eks-cluster
# Deploy Load balancer and Ingress