From 5b29b87ad7f7e5d79ebca5278abe5f94a0f3f3e7 Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Mon, 13 May 2024 18:48:57 +0530 Subject: [PATCH 1/7] add support to push logs to s3 bucket --- lib/aws/eks.ts | 2 ++ lib/aws/log_bucket.ts | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 lib/aws/log_bucket.ts diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts index e6af4ba..4da0c1f 100644 --- a/lib/aws/eks.ts +++ b/lib/aws/eks.ts @@ -7,6 +7,7 @@ import { Construct } from "constructs"; import { Config } from "./config"; import { ElasticacheStack } from "./elasticache"; import { DataBaseConstruct } from "./rds"; +import { LogsBucket } from "./log_bucket"; import * as kms from "aws-cdk-lib/aws-kms"; import { readFileSync } from "fs"; import { Secret } from "aws-cdk-lib/aws-secretsmanager"; @@ -56,6 +57,7 @@ export class EksStack { clusterName: "hs-eks-cluster", }); + const logsBucket = new LogsBucket(scope, cluster, "app-logs-s3-service-account"); cluster.node.addDependency(ecrTransfer.codebuildTrigger); cdk.Tags.of(cluster).add("SubStack", "HyperswitchEKS"); diff --git a/lib/aws/log_bucket.ts b/lib/aws/log_bucket.ts new file mode 100644 index 0000000..b6b5e05 --- /dev/null +++ b/lib/aws/log_bucket.ts @@ -0,0 +1,32 @@ +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as cdk from "aws-cdk-lib"; +import { Construct } from 'constructs'; +import * as s3 from "aws-cdk-lib/aws-s3"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as eks from "aws-cdk-lib/aws-eks"; + + +export class LogsBucket { + bucket: s3.Bucket; + constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) { + this.bucket = new s3.Bucket(scope, "LogsBucket", { + removalPolicy: cdk.RemovalPolicy.DESTROY, + bucketName: "logs-bucket-eks-hs-2023-05-12", + }); + cluster.node.addDependency(this.bucket); + const ns = cluster.addManifest("logging-ns", { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "logging" + } + }) + const sa = cluster.addServiceAccount("app-logs-s3-service-account", { + name: serviceAccountName, + namespace: "logging" + }); + sa.node.addDependency(ns); + this.bucket.grantReadWrite(sa); + new cdk.CfnOutput(scope, 'LogsS3Bucket', { value: this.bucket.bucketName }); + } +} From 6dd7916e5fd3af3162f3edfbb23aff172cd1570b Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Tue, 14 May 2024 12:36:18 +0530 Subject: [PATCH 2/7] update bucket name --- lib/aws/eks.ts | 1 + lib/aws/log_bucket.ts | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts index 4da0c1f..fc3fdd2 100644 --- a/lib/aws/eks.ts +++ b/lib/aws/eks.ts @@ -521,6 +521,7 @@ export class EksStack { application: { server: { secrets_manager: "aws_kms", + bucket_name: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, serviceAccountAnnotations: { "eks.amazonaws.com/role-arn": hyperswitchServiceAccountRole.roleArn, }, diff --git a/lib/aws/log_bucket.ts b/lib/aws/log_bucket.ts index b6b5e05..252fb19 100644 --- a/lib/aws/log_bucket.ts +++ b/lib/aws/log_bucket.ts @@ -11,7 +11,7 @@ export class LogsBucket { constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) { this.bucket = new s3.Bucket(scope, "LogsBucket", { removalPolicy: cdk.RemovalPolicy.DESTROY, - bucketName: "logs-bucket-eks-hs-2023-05-12", + bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, }); cluster.node.addDependency(this.bucket); const ns = cluster.addManifest("logging-ns", { From fa5ee71d9c8ce5c01b2455ef533716a640ef7781 Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Wed, 15 May 2024 12:42:07 +0530 Subject: [PATCH 3/7] update session manager policy name --- lib/aws/stack.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/aws/stack.ts b/lib/aws/stack.ts index 5a54ea2..12b65c2 100644 --- a/lib/aws/stack.ts +++ b/lib/aws/stack.ts @@ -226,7 +226,7 @@ export class AWSStack extends cdk.Stack { ] }); const ext_jump_policy = new iam.ManagedPolicy(this, 'SessionManagerPolicies', { - managedPolicyName: "SessionManagerPolicies", + managedPolicyName: `SessionManagerPolicies-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, description: "SessionManagerPolicies", document: external_jump_policy }); From f5d1a635cf0b805dea3aadf6f925f9285b1ee222 Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Sun, 19 May 2024 16:19:17 +0530 Subject: [PATCH 4/7] add support to push logs to s3 --- lib/aws/log_bucket.ts | 105 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/lib/aws/log_bucket.ts b/lib/aws/log_bucket.ts index 252fb19..e5f32d1 100644 --- a/lib/aws/log_bucket.ts +++ b/lib/aws/log_bucket.ts @@ -14,7 +14,7 @@ export class LogsBucket { bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, }); cluster.node.addDependency(this.bucket); - const ns = cluster.addManifest("logging-ns", { + const ns = cluster.addManifest("logging-ns", { "apiVersion": "v1", "kind": "Namespace", "metadata": { @@ -27,6 +27,109 @@ export class LogsBucket { }); sa.node.addDependency(ns); this.bucket.grantReadWrite(sa); + + const fluentdChart = cluster.addHelmChart("fluentd", { + chart: "fluentd", + repository: "https://fluent.github.io/helm-charts", + namespace: "logging", + wait: false, + values: { + kind: "DaemonSet", + serviceAccount: { + create: false, + name: sa.serviceAccountName + }, + fullnameOverride: "fluentd-s3", + variant: "s3", + labels: { + app: "fluentd-s3" + }, + resources: { + limits: { + cpu: "1", + memory: "1200Mi" + }, + requests: { + cpu: "200m", + memory: "150Mi" + } + }, + rbac: { + create: false + }, + livenessProbe: null, + readinessProbe: null, + service: { + enabled: false, + type: "ClusterIP", + }, + image: { + repository: "fluent/fluentd-kubernetes-daemonset", + pullPolicy: "IfNotPresent", + tag: "v1.16-debian-s3-1" + }, + env: [ + { + name: "S3_BUCKET", + value: this.bucket.bucketName, + }, + { + name: "S3_REGION", + value: process.env.CDK_DEFAULT_REGION, + } + + ], + terminationGracePeriodSeconds: 30, + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + fileConfigs:{ + "01_sources.conf":` + @type tail + @id in_tail_hyperswitch-server-router_logs + + path /var/log/containers/hyperswitch-*.log + pos_file /var/log/fluentd-hyperswitch-server-router-containers.log.pos + tag "hyperswitch.*" + read_from_head true + + @type regexp + expression /^(? + `, + "02_filters.conf":"", + "03_dispatch.conf":"", + "04_outputs.conf": ` + + @type json + + @type copy + + @type stdout + + + @type s3 + s3_bucket "#{ENV['S3_BUCKET']}" + s3_region "#{ENV['S3_REGION']}" + path "hyperswitch-logs/%Y/%m/%d/$\{tag\}/" + + @type file + path /var/log/fluent/s3 + timekey 3600 # 1 hour partition + timekey_wait 10m + timekey_zone +0530 + chunk_limit_size 256m + flush_at_shutdown + + + ` + + }, + } + + }); + new cdk.CfnOutput(scope, 'LogsS3Bucket', { value: this.bucket.bucketName }); } } From 99bb89db37ef20a963c6b68a2d77fa02f970b1cf Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Tue, 21 May 2024 15:38:31 +0530 Subject: [PATCH 5/7] add support to push logs to opensearch --- install.sh | 60 +++++++ lib/aws/eks.ts | 8 +- lib/aws/log_bucket.ts | 135 -------------- lib/aws/log_stack.ts | 400 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 465 insertions(+), 138 deletions(-) delete mode 100644 lib/aws/log_bucket.ts create mode 100644 lib/aws/log_stack.ts diff --git a/install.sh b/install.sh index 3d4ca30..449264b 100644 --- a/install.sh +++ b/install.sh @@ -363,6 +363,66 @@ while true; do fi done +validate_master_password() { + local master_password=$1 + + # Check length (at least 8 characters) + if [[ ${#master_password} -lt 8 ]]; then + display_error "Error: Password must be at least 8 characters." + return 1 + fi + + # Check if it starts with an alphabet + if [[ ! $master_password =~ ^[A-Za-z] ]]; then + display_error "Error: Password must start with a letter." + return 1 + fi + + # Check for at least one uppercase letter and one lowercase letter + if [[ ! $master_password =~ [A-Z] || ! $master_password =~ [a-z] ]]; then + display_error "Error: Password must include at least one uppercase and one lowercase letter." + return 1 + fi + + # Check for at least one digit + if [[ ! $master_password =~ [0-9] ]]; then + display_error "Error: Password must include at least one digit." + return 1 + fi + + # read password again to confirm + echo "Please re-enter the password: " + read -r -s master_password_confirm + if [[ "$master_password" != "$master_password_confirm" ]]; then + display_error "Error: Passwords do not match." + return 1 + fi + + return 0 + +} + +echo "Do you want to push logs to S3 and Open Search? [y/n]: " +read -r OPEN_SEARCH_SERVICE + +if [[ "$OPEN_SEARCH_SERVICE" == "y" ]]; then + read -p "Please enter the Master UserName for Open Search Service: " MASTER_USER_NAME + while true; do + echo "Please enter the Master Password for Open Search Service: " + read -r -s MASTER_PASSWORD + if validate_master_password "$MASTER_PASSWORD"; then + break + fi + done + +elif [[ "$OPEN_SEARCH_SERVICE" == "n" ]]; then + break +else + echo "Invalid input. Please enter 'y' or 'n'." + read -r OPEN_SEARCH_SERVICE +fi + + if [[ "$INSTALLATION_MODE" == 2 ]]; then while true; do diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts index 62498d1..e281b53 100644 --- a/lib/aws/eks.ts +++ b/lib/aws/eks.ts @@ -7,7 +7,7 @@ import { Construct } from "constructs"; import { Config } from "./config"; import { ElasticacheStack } from "./elasticache"; import { DataBaseConstruct } from "./rds"; -import { LogsBucket } from "./log_bucket"; +import { LogsStack } from "./log_stack"; import * as kms from "aws-cdk-lib/aws-kms"; import { readFileSync } from "fs"; import { Secret } from "aws-cdk-lib/aws-secretsmanager"; @@ -56,8 +56,10 @@ export class EksStack { vpc: vpc, clusterName: "hs-eks-cluster", }); - - const logsBucket = new LogsBucket(scope, cluster, "app-logs-s3-service-account"); + if (`${process.env.OPEN_SEARCH_SERVICE}` == "y"){ + const logsStack = new LogsStack(scope, cluster, "app-logs-s3-service-account"); + } + cluster.node.addDependency(ecrTransfer.codebuildTrigger); cdk.Tags.of(cluster).add("SubStack", "HyperswitchEKS"); diff --git a/lib/aws/log_bucket.ts b/lib/aws/log_bucket.ts deleted file mode 100644 index e5f32d1..0000000 --- a/lib/aws/log_bucket.ts +++ /dev/null @@ -1,135 +0,0 @@ -import * as ec2 from 'aws-cdk-lib/aws-ec2'; -import * as cdk from "aws-cdk-lib"; -import { Construct } from 'constructs'; -import * as s3 from "aws-cdk-lib/aws-s3"; -import * as iam from "aws-cdk-lib/aws-iam"; -import * as eks from "aws-cdk-lib/aws-eks"; - - -export class LogsBucket { - bucket: s3.Bucket; - constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) { - this.bucket = new s3.Bucket(scope, "LogsBucket", { - removalPolicy: cdk.RemovalPolicy.DESTROY, - bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, - }); - cluster.node.addDependency(this.bucket); - const ns = cluster.addManifest("logging-ns", { - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "logging" - } - }) - const sa = cluster.addServiceAccount("app-logs-s3-service-account", { - name: serviceAccountName, - namespace: "logging" - }); - sa.node.addDependency(ns); - this.bucket.grantReadWrite(sa); - - const fluentdChart = cluster.addHelmChart("fluentd", { - chart: "fluentd", - repository: "https://fluent.github.io/helm-charts", - namespace: "logging", - wait: false, - values: { - kind: "DaemonSet", - serviceAccount: { - create: false, - name: sa.serviceAccountName - }, - fullnameOverride: "fluentd-s3", - variant: "s3", - labels: { - app: "fluentd-s3" - }, - resources: { - limits: { - cpu: "1", - memory: "1200Mi" - }, - requests: { - cpu: "200m", - memory: "150Mi" - } - }, - rbac: { - create: false - }, - livenessProbe: null, - readinessProbe: null, - service: { - enabled: false, - type: "ClusterIP", - }, - image: { - repository: "fluent/fluentd-kubernetes-daemonset", - pullPolicy: "IfNotPresent", - tag: "v1.16-debian-s3-1" - }, - env: [ - { - name: "S3_BUCKET", - value: this.bucket.bucketName, - }, - { - name: "S3_REGION", - value: process.env.CDK_DEFAULT_REGION, - } - - ], - terminationGracePeriodSeconds: 30, - dnsPolicy: "ClusterFirst", - restartPolicy: "Always", - schedulerName: "default-scheduler", - securityContext: {}, - fileConfigs:{ - "01_sources.conf":` - @type tail - @id in_tail_hyperswitch-server-router_logs - - path /var/log/containers/hyperswitch-*.log - pos_file /var/log/fluentd-hyperswitch-server-router-containers.log.pos - tag "hyperswitch.*" - read_from_head true - - @type regexp - expression /^(? - `, - "02_filters.conf":"", - "03_dispatch.conf":"", - "04_outputs.conf": ` - - @type json - - @type copy - - @type stdout - - - @type s3 - s3_bucket "#{ENV['S3_BUCKET']}" - s3_region "#{ENV['S3_REGION']}" - path "hyperswitch-logs/%Y/%m/%d/$\{tag\}/" - - @type file - path /var/log/fluent/s3 - timekey 3600 # 1 hour partition - timekey_wait 10m - timekey_zone +0530 - chunk_limit_size 256m - flush_at_shutdown - - - ` - - }, - } - - }); - - new cdk.CfnOutput(scope, 'LogsS3Bucket', { value: this.bucket.bucketName }); - } -} diff --git a/lib/aws/log_stack.ts b/lib/aws/log_stack.ts new file mode 100644 index 0000000..0311a21 --- /dev/null +++ b/lib/aws/log_stack.ts @@ -0,0 +1,400 @@ +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as cdk from "aws-cdk-lib"; +import { Construct } from 'constructs'; +import * as s3 from "aws-cdk-lib/aws-s3"; +import * as iam from "aws-cdk-lib/aws-iam"; +import * as eks from "aws-cdk-lib/aws-eks"; +import * as opensearch from 'aws-cdk-lib/aws-opensearchservice'; +import { Domain, EngineVersion, IpAddressType } from 'aws-cdk-lib/aws-opensearchservice'; + + +export class LogsStack { + bucket: s3.Bucket; + domain: Domain; + constructor(scope: Construct, cluster: eks.Cluster, serviceAccountName?: string) { + this.bucket = new s3.Bucket(scope, "LogsBucket", { + removalPolicy: cdk.RemovalPolicy.DESTROY, + bucketName: `logs-bucket-${process.env.CDK_DEFAULT_ACCOUNT}-${process.env.CDK_DEFAULT_REGION}`, + }); + cluster.node.addDependency(this.bucket); + const loggingNS = cluster.addManifest("logging-ns", { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "logging" + } + }) + const sa = cluster.addServiceAccount("app-logs-s3-service-account", { + name: serviceAccountName, + namespace: "logging" + }); + sa.node.addDependency(loggingNS); + this.bucket.grantReadWrite(sa); + + const fluentdChart = cluster.addHelmChart("fluentd", { + chart: "fluentd", + repository: "https://fluent.github.io/helm-charts", + namespace: "logging", + wait: false, + values: { + kind: "DaemonSet", + serviceAccount: { + create: false, + name: sa.serviceAccountName + }, + fullnameOverride: "fluentd-s3", + variant: "s3", + labels: { + app: "fluentd-s3" + }, + resources: { + limits: { + cpu: "1", + memory: "1200Mi" + }, + requests: { + cpu: "200m", + memory: "150Mi" + } + }, + rbac: { + create: false + }, + livenessProbe: null, + readinessProbe: null, + service: { + enabled: false, + type: "ClusterIP", + }, + image: { + repository: "fluent/fluentd-kubernetes-daemonset", + pullPolicy: "IfNotPresent", + tag: "v1.16-debian-s3-1" + }, + env: [ + { + name: "S3_BUCKET", + value: this.bucket.bucketName, + }, + { + name: "S3_REGION", + value: process.env.CDK_DEFAULT_REGION, + } + + ], + terminationGracePeriodSeconds: 30, + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + fileConfigs: { + "01_sources.conf": ` + @type tail + @id in_tail_hyperswitch-server-router_logs + + path /var/log/containers/hyperswitch-*.log + pos_file /var/log/fluentd-hyperswitch-server-router-containers.log.pos + tag "hyperswitch.*" + read_from_head true + + @type regexp + expression /^(? + `, + "02_filters.conf": "", + "03_dispatch.conf": "", + "04_outputs.conf": ` + + @type json + + @type copy + + @type stdout + + + @type s3 + s3_bucket "#{ENV['S3_BUCKET']}" + s3_region "#{ENV['S3_REGION']}" + path "hyperswitch-logs/%Y/%m/%d/$\{tag\}/" + + @type file + path /var/log/fluent/s3 + timekey 3600 # 1 hour partition + timekey_wait 10m + timekey_zone +0530 + chunk_limit_size 256m + flush_at_shutdown + + + ` + + }, + } + + }); + + fluentdChart.node.addDependency(sa); + + this.domain = new opensearch.Domain(scope, 'OpenSearch', { + version: opensearch.EngineVersion.OPENSEARCH_2_11, + enableVersionUpgrade: false, + ebs: { + volumeSize: 50, + volumeType: ec2.EbsDeviceVolumeType.GP3, + throughput: 125, + iops: 3000, + }, + fineGrainedAccessControl: { + masterUserName: "admin", + masterUserPassword: cdk.SecretValue.unsafePlainText("Pluentd@123"), + }, + nodeToNodeEncryption: true, + encryptionAtRest: { + enabled: true, + }, + removalPolicy: cdk.RemovalPolicy.DESTROY, + enforceHttps: true, + zoneAwareness:{ + enabled: true, + availabilityZoneCount: 2 + }, + capacity: { + dataNodes: 2, + dataNodeInstanceType: "r6g.large.search", + multiAzWithStandbyEnabled: false + } + }); + // this.domain.grantReadWrite(new iam.AnyPrincipal()); + const policy = new iam.PolicyStatement({ + effect: iam.Effect.ALLOW, + principals: [new iam.AnyPrincipal()], + actions: ["es:*"], + resources: [`${this.domain.domainArn}/*`], + }); + this.domain.addAccessPolicies(policy); + + const kAnalyticsNS = cluster.addManifest("kube-analytics-ns", { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "kube-analytics" + } + }); + + kAnalyticsNS.node.addDependency(this.domain); + + const openSearchFluentdChart = cluster.addHelmChart("fluentd-opensearch", { + chart: "fluentd", + repository: "https://fluent.github.io/helm-charts", + namespace: "kube-analytics", + wait: false, + values: { + kind: "DaemonSet", + serviceAccount: { + create: false, + name: null + }, + fullnameOverride: "fluentd-opensearch", + variant: "opensearch", + labels: { + app: "fluentd-opensearch" + }, + resources: { + limits: { + cpu: "1", + memory: "1200Mi" + }, + requests: { + cpu: "200m", + memory: "150Mi" + } + }, + rbac: { + create: true + }, + livenessProbe: null, + readinessProbe: null, + service: { + enabled: false, + type: "ClusterIP", + }, + image: { + repository: "fluent/fluentd-kubernetes-daemonset", + pullPolicy: "IfNotPresent", + tag: "v1.16-debian-opensearch-2" + }, + env: [ + { + name: "FLUENT_OPENSEARCH_HOST", + value: this.domain.domainEndpoint, + }, + { + name: "FLUENT_OPENSEARCH_PORT", + value: "443", + }, + { + name: "FLUENT_OPENSEARCH_SSL_VERIFY", + value: "true", + }, + { + name: "FLUENT_OPENSEARCH_USER_NAME", + value: `${process.env.MASTER_USER_NAME}`, + }, + { + name: "FLUENT_OPENSEARCH_PASSWORD", + value: `${process.env.MASTER_PASSWORD}`, + }, + { + name: "FLUENT_OPENSEARCH_SCHEME", + value: "https", + } + + ], + terminationGracePeriodSeconds: 30, + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + fileConfigs: { + "01_sources.conf": ` + + @type tail + @id in_tail_hyperswitch-server-router_logs + + path /var/log/containers/hyperswitch-server*.log + pos_file /var/log/fluentd-hyperswitch-server-router-containers.log.pos + tag "hyperswitch.router" + read_from_head true + + @type regexp + expression /^(? + + + + @type tail + @id in_tail_hyperswitch-consumer_logs + + path /var/log/containers/hyperswitch-consumer*hyperswitch-*.log + pos_file /var/log/fluentd-hyperswitch-consumer-containers.log.pos + tag "hyperswitch.consumer" + read_from_head true + + @type regexp + expression /^(? + + + # Hyperswitch Drainer Source + + @type tail + @id in_tail_hyperswitch-drainer_logs + + path /var/log/containers/hyperswitch-drainer*hyperswitch-*.log + pos_file /var/log/fluentd-hyperswitch-drainer-containers.log.pos + tag "hyperswitch.drainer" + read_from_head true + + @type regexp + expression /^(? + + + # HyperSwitch Producer Source + + @type tail + @id in_tail_hyperswitch-producer_logs + + path /var/log/containers/hyperswitch-producer*hyperswitch-*.log + pos_file /var/log/fluentd-hyperswitch-producer-containers.log.pos + tag "hyperswitch.producer" + read_from_head true + + @type regexp + expression /^(? + `, + + "02_filters.conf": ` + # Parse JSON Logs + + @type parser + + key_name log + reserve_time true + + @type multi_format + + format json + hash_value_field json_log + format_name 'json' + + + format regexp + expression /^(?.*)$/ + format_name 'raw_message' + + + + # Add kubernetes metadata + + @type kubernetes_metadata + `, + + "03_dispatch.conf": "", + + "04_outputs.conf": ` + + + @type json + + @type copy + + @type opensearch + @id hyperswitch-out_es + id_key _hash + remove_keys _hash + @log_level debug + prefer_oj_serializer true + reload_on_failure true + reload_connections false + user "#{ENV['FLUENT_OPENSEARCH_USER_NAME']}" + password "#{ENV['FLUENT_OPENSEARCH_PASSWORD']}" + request_timeout 120s + bulk_message_request_threshold 10MB + host "#{ENV['FLUENT_OPENSEARCH_HOST']}" + port "#{ENV['FLUENT_OPENSEARCH_PORT']}" + scheme "#{ENV['FLUENT_OPENSEARCH_SCHEME'] || 'http'}" + ssl_verify "#{ENV['FLUENT_OPENSEARCH_SSL_VERIFY'] || 'true'}" + logstash_prefix logstash-$\{tag\} + include_timestamp true + logstash_format true + type_name fluentd + + @type file + path /var/log/opensearch-buffers/hyperswitch-buffer + flush_thread_count 6 + flush_interval 1s + chunk_limit_size 5M + queue_limit_length 4 + flush_mode interval + retry_max_interval 30 + retry_type exponential_backoff + overflow_action drop_oldest_chunk + + + ` + }, + ingress:{ + enabled: false, + } + } + + }); + openSearchFluentdChart.node.addDependency(kAnalyticsNS); + + + new cdk.CfnOutput(scope, 'LogsS3Bucket', { value: this.bucket.bucketName }); + new cdk.CfnOutput(scope, 'OpenSearch Endpoint', { value: this.domain.domainEndpoint }); + } +} From 47d0b34103b1302212416936826b5c5e01a499e8 Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Tue, 21 May 2024 18:55:42 +0530 Subject: [PATCH 6/7] code refactoring --- install.sh | 8 ++++---- lib/aws/eks.ts | 3 ++- lib/aws/log_stack.ts | 6 ++++-- single-click/single-click.sh | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/install.sh b/install.sh index 449264b..130873e 100644 --- a/install.sh +++ b/install.sh @@ -406,11 +406,11 @@ echo "Do you want to push logs to S3 and Open Search? [y/n]: " read -r OPEN_SEARCH_SERVICE if [[ "$OPEN_SEARCH_SERVICE" == "y" ]]; then - read -p "Please enter the Master UserName for Open Search Service: " MASTER_USER_NAME + read -p "Please enter the Master UserName for Open Search Service: " OPEN_SEARCH_MASTER_USER_NAME while true; do echo "Please enter the Master Password for Open Search Service: " - read -r -s MASTER_PASSWORD - if validate_master_password "$MASTER_PASSWORD"; then + read -r -s OPEN_SEARCH_MASTER_PASSWORD + if validate_master_password "$OPEN_SEARCH_MASTER_PASSWORD"; then break fi done @@ -483,7 +483,7 @@ if [[ "$INSTALLATION_MODE" == 2 ]]; then aws iam delete-role --role-name $ROLE_NAME 2>/dev/null cdk bootstrap aws://$AWS_ACCOUNT_ID/$AWS_DEFAULT_REGION -c aws_arn=$AWS_ARN fi - if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER; then + if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER -c open_search_service=$OPEN_SEARCH_SERVICE -c open_search_master_user_name=$OPEN_SEARCH_MASTER_USER_NAME -c open_search_master_password=$OPEN_SEARCH_MASTER_PASSWORD; then # Wait for the EKS Cluster to be deployed echo $(aws eks create-addon --cluster-name hs-eks-cluster --addon-name amazon-cloudwatch-observability) aws eks update-kubeconfig --region "$AWS_DEFAULT_REGION" --name hs-eks-cluster diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts index e281b53..c13a1f2 100644 --- a/lib/aws/eks.ts +++ b/lib/aws/eks.ts @@ -56,7 +56,8 @@ export class EksStack { vpc: vpc, clusterName: "hs-eks-cluster", }); - if (`${process.env.OPEN_SEARCH_SERVICE}` == "y"){ + let open_search_service = scope.node.tryGetContext('open_search_service'); + if (`${open_search_service}` == "y"){ const logsStack = new LogsStack(scope, cluster, "app-logs-s3-service-account"); } diff --git a/lib/aws/log_stack.ts b/lib/aws/log_stack.ts index 0311a21..fbc9667 100644 --- a/lib/aws/log_stack.ts +++ b/lib/aws/log_stack.ts @@ -182,6 +182,8 @@ export class LogsStack { }); kAnalyticsNS.node.addDependency(this.domain); + let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name'); + let open_search_master_password = scope.node.tryGetContext('open_search_master_password'); const openSearchFluentdChart = cluster.addHelmChart("fluentd-opensearch", { chart: "fluentd", @@ -238,11 +240,11 @@ export class LogsStack { }, { name: "FLUENT_OPENSEARCH_USER_NAME", - value: `${process.env.MASTER_USER_NAME}`, + value: `${open_search_master_user_name}`, }, { name: "FLUENT_OPENSEARCH_PASSWORD", - value: `${process.env.MASTER_PASSWORD}`, + value: `${open_search_master_password}`, }, { name: "FLUENT_OPENSEARCH_SCHEME", diff --git a/single-click/single-click.sh b/single-click/single-click.sh index 7bb1c82..7625a71 100644 --- a/single-click/single-click.sh +++ b/single-click/single-click.sh @@ -118,7 +118,7 @@ else # cdk bootstrap aws://"$AWS_ACCOUNT_ID"/"$AWS_DEFAULT_REGION" -c aws_arn="$AWS_ARN" -c stack=imagebuilder # cdk deploy --require-approval never -c stack=imagebuilder $AMI_OPTIONS cdk bootstrap aws://$AWS_ACCOUNT/$AWS_REGION -c aws_arn=$AWS_ARN - if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER; then + if cdk deploy --require-approval never -c db_pass=$DB_PASS -c admin_api_key=$ADMIN_API_KEY -c aws_arn=$AWS_ARN -c master_enc_key=$MASTER_ENC_KEY -c vpn_ips=$VPN_IPS -c base_ami=$base_ami -c envoy_ami=$envoy_ami -c squid_ami=$squid_ami $LOCKER -c open_search_service=$OPEN_SEARCH_SERVICE -c open_search_master_user_name=$OPEN_SEARCH_MASTER_USER_NAME -c open_search_master_password=$OPEN_SEARCH_MASTER_PASSWORD; then echo $(aws eks create-addon --cluster-name hs-eks-cluster --addon-name amazon-cloudwatch-observability) aws eks update-kubeconfig --region "$AWS_REGION" --name hs-eks-cluster # Deploy Load balancer and Ingress From a298ec56fcc3614ba782203d251320ae58b0c9dd Mon Sep 17 00:00:00 2001 From: Prasunna Soppa Date: Tue, 28 May 2024 15:25:57 +0530 Subject: [PATCH 7/7] resolve pr comments --- install.sh | 10 ++++++++-- lib/aws/eks.ts | 5 +++-- lib/aws/log_stack.ts | 4 ++-- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/install.sh b/install.sh index 130873e..fb22941 100644 --- a/install.sh +++ b/install.sh @@ -363,7 +363,7 @@ while true; do fi done -validate_master_password() { +validate_opensearch_password() { local master_password=$1 # Check length (at least 8 characters) @@ -390,6 +390,12 @@ validate_master_password() { return 1 fi + # Check for special characters + if [[ $password == [^A-Za-z0-9] ]]; then + display_error "Error: Password should include special characters." + return 1 + fi + # read password again to confirm echo "Please re-enter the password: " read -r -s master_password_confirm @@ -410,7 +416,7 @@ if [[ "$OPEN_SEARCH_SERVICE" == "y" ]]; then while true; do echo "Please enter the Master Password for Open Search Service: " read -r -s OPEN_SEARCH_MASTER_PASSWORD - if validate_master_password "$OPEN_SEARCH_MASTER_PASSWORD"; then + if validate_opensearch_password "$OPEN_SEARCH_MASTER_PASSWORD"; then break fi done diff --git a/lib/aws/eks.ts b/lib/aws/eks.ts index c13a1f2..fbc3b97 100644 --- a/lib/aws/eks.ts +++ b/lib/aws/eks.ts @@ -56,8 +56,9 @@ export class EksStack { vpc: vpc, clusterName: "hs-eks-cluster", }); - let open_search_service = scope.node.tryGetContext('open_search_service'); - if (`${open_search_service}` == "y"){ + + let push_logs = scope.node.tryGetContext('open_search_service') || 'n'; + if (`${push_logs}` == "y"){ const logsStack = new LogsStack(scope, cluster, "app-logs-s3-service-account"); } diff --git a/lib/aws/log_stack.ts b/lib/aws/log_stack.ts index fbc9667..bcc2ec0 100644 --- a/lib/aws/log_stack.ts +++ b/lib/aws/log_stack.ts @@ -182,8 +182,8 @@ export class LogsStack { }); kAnalyticsNS.node.addDependency(this.domain); - let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name'); - let open_search_master_password = scope.node.tryGetContext('open_search_master_password'); + let open_search_master_user_name = scope.node.tryGetContext('open_search_master_user_name') || "admin"; + let open_search_master_password = scope.node.tryGetContext('open_search_master_password') || "Password@123"; const openSearchFluentdChart = cluster.addHelmChart("fluentd-opensearch", { chart: "fluentd",