Skip to content

Commit

Permalink
Code clean up.
Browse files Browse the repository at this point in the history
  • Loading branch information
zzhlogin committed Mar 11, 2024
1 parent 49139c9 commit 67d22f3
Showing 1 changed file with 0 additions and 95 deletions.
95 changes: 0 additions & 95 deletions .github/workflows/appsignals-python-e2e-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,6 @@ jobs:
ref: abf75babe672412cb63c56cbcf1c5ce2d8c97a1c
fetch-depth: 0

# - name: Download enablement script
# uses: actions/checkout@v4
# with:
# repository: aws-observability/application-signals-demo
# ref: main
# path: enablement-script
# sparse-checkout: |
# scripts/eks/appsignals/enable-app-signals.sh
# scripts/eks/appsignals/clean-app-signals.sh
# sparse-checkout-cone-mode: false

- name: Generate testing id
run: echo TESTING_ID="${{ env.AWS_DEFAULT_REGION }}-${{ github.run_id }}-${{ github.run_number }}" >> $GITHUB_ENV

Expand Down Expand Up @@ -95,26 +84,11 @@ jobs:
- name: Setup Helm
uses: azure/setup-helm@v3

# # TODO: This step is used for custom pre-release instrumentation
# # It is a temporary measure until the cw add-on is released with python support
# - name: Checkout Amazon Cloudwatch Agent Operator
# uses: actions/checkout@v4
# with:
# repository: aws/amazon-cloudwatch-agent-operator
# # SHA as of March 4
# ref: abf75babe672412cb63c56cbcf1c5ce2d8c97a1c
# path: amazon-cloudwatch-agent-operator

# TODO: This step is used for custom pre-release instrumentation
# It is a temporary measure until the cw add-on is released with python support
- name: Edit Helm values for Amazon Cloudwatch Agent Operator
working-directory: helm/
run: |
echo "HERE!!!!!!!!"
echo "pwd"
pwd
echo "ls"
ls
sed -i 's/clusterName:/clusterName: ${{ inputs.test-cluster-name }}/g' values.yaml
sed -i 's/region:/region: ${{ env.AWS_DEFAULT_REGION }}/g' values.yaml
sed -i 's/repository: cloudwatch-agent-operator/repository: cwagent-operator-pre-release/g' values.yaml
Expand Down Expand Up @@ -168,64 +142,18 @@ jobs:
-var="python_app_image=${{ env.TEST_ACCOUNT }}.dkr.ecr.${{ env.AWS_DEFAULT_REGION }}.amazonaws.com/${{ env.APP_SIGNALS_PYTHON_E2E_FE_SA_IMG }}:latest" \
-var="python_remote_app_image=${{ env.TEST_ACCOUNT }}.dkr.ecr.${{ env.AWS_DEFAULT_REGION }}.amazonaws.com/${{ env.APP_SIGNALS_PYTHON_E2E_RE_SA_IMG }}:latest"
# # Enable App Signals on the test cluster
# - name: Enable App Signals
# working-directory: enablement-script/scripts/eks/appsignals
# run: |
# ./enable-app-signals.sh \
# ${{ inputs.test-cluster-name }} \
# ${{ env.AWS_DEFAULT_REGION }} \
# ${{ env.SAMPLE_APP_NAMESPACE }}
#
# - name: Save CloudWatch Agent Operator image to environment before patching
# run: |
# echo "OLD_CW_AGENT_OPERATOR_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=amazon-cloudwatch-observability -o json | \
# jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV
#
# - name: Patch the CloudWatch Agent Operator image and restart CloudWatch pods
# run: |
# kubectl patch deploy -n amazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' -p '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "${{ env.ECR_OPERATOR_STAGING_IMAGE }}"}]'
# kubectl delete pods --all -n amazon-cloudwatch
# kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch
#
# # Application pods need to be restarted for the
# # app signals instrumentation to take effect
# - name: Restart the app pods
# run: kubectl delete pods --all -n ${{ env.SAMPLE_APP_NAMESPACE }}

- name: Wait for sample app pods to come up
run: |
kubectl wait --for=condition=Ready pod --all -n ${{ env.SAMPLE_APP_NAMESPACE }} \
echo "Installing app signals to the sample app"
pwd
ls
cat namespace.yaml
kubectl apply -f namespace.yaml
echo "helm apply"
pwd
ls
helm template amazon-cloudwatch-observability ./helm --debug --include-crds --namespace amazon-cloudwatch | kubectl apply --namespace amazon-cloudwatch --server-side --force-conflicts -f -
echo "kubectl wait "
kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch
echo "kubectl delete pods --all -n ${{ env.SAMPLE_APP_NAMESPACE }}"
kubectl delete pods --all -n ${{ env.SAMPLE_APP_NAMESPACE }}
kubectl wait --for=condition=Ready pod --all -n ${{ env.SAMPLE_APP_NAMESPACE }}
echo "2.pwd"
pwd
echo "ls"
ls
cd ${{ github.workspace }}/aws-application-signals-test-framework/terraform/python/eks
echo "3.pwd"
pwd
echo "ls"
ls
# Attach policies to cluster node group roles that are required for AppSignals
aws eks list-nodegroups --cluster-name ${{ inputs.test-cluster-name }} --region ${{ env.AWS_DEFAULT_REGION }} |\
jq -r '.nodegroups[]' |\
Expand Down Expand Up @@ -268,13 +196,6 @@ jobs:
echo "NEW_CW_AGENT_OPERATOR_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=amazon-cloudwatch-observability -o json | \
jq '.items[0].status.containerStatuses[0].image') >> $GITHUB_ENV
# - name: Check if CW Agent Operator image has changed
# run: |
# if [ ${{ env.OLD_CW_AGENT_OPERATOR_IMAGE }} = ${{ env.NEW_CW_AGENT_OPERATOR_IMAGE }} ]; then
# echo "Operator image did not change"
# exit 1
# fi

- name: Get the sample app endpoint
run: |
echo "APP_ENDPOINT=$(terraform output python_app_endpoint)" >> $GITHUB_ENV
Expand Down Expand Up @@ -305,9 +226,6 @@ jobs:
curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/remote-service?ip=${{ env.REMOTE_SERVICE_POD_IP }}
curl -S -s -o /dev/null http://${{ env.APP_ENDPOINT }}/client-call
# - name: Build Gradle
# run: ./gradlew

# Validation for app signals telemetry data
- name: Call endpoint and validate generated EMF logs
id: log-validation
Expand Down Expand Up @@ -364,24 +282,11 @@ jobs:
--request-body ip=${{ env.REMOTE_SERVICE_POD_IP }}
--rollup'

# Clean up Procedures

# - name: Remove log group deletion command
# if: always()
## working-directory: enablement-script/scripts/eks/appsignals
# run: |
# delete_log_group="aws logs delete-log-group --log-group-name '${{ env.LOG_GROUP }}' --region \$REGION"
# sed -i "s#$delete_log_group##g" clean-app-signals.sh

- name: Clean Up App Signals
if: always()
continue-on-error: true
run: |
kubectl delete -f ./namespace.yaml
# ./clean-app-signals.sh \
# ${{ inputs.test-cluster-name }} \
# ${{ env.AWS_DEFAULT_REGION }} \
# ${{ env.SAMPLE_APP_NAMESPACE }}
# This step also deletes lingering resources from previous test runs
- name: Delete all sample app resources
Expand Down

0 comments on commit 67d22f3

Please sign in to comment.