diff --git a/contribute/developer-guide/templates/experiment_k8s.tmpl b/contribute/developer-guide/templates/experiment_k8s.tmpl index 4b776853a..4bd36801d 100644 --- a/contribute/developer-guide/templates/experiment_k8s.tmpl +++ b/contribute/developer-guide/templates/experiment_k8s.tmpl @@ -76,7 +76,7 @@ func Experiment(clients clients.ClientSets){ // POD STATUS CHECKS FOR THE APPLICATION UNDER TEST AND AUXILIARY APPLICATIONS ARE ADDED BY DEFAULT //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -152,7 +152,7 @@ func Experiment(clients clients.ClientSets){ // POD STATUS CHECKS FOR THE APPLICATION UNDER TEST AND AUXILIARY APPLICATIONS ARE ADDED BY DEFAULT //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go index 5cbdb6c02..c4dc9f9a5 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go @@ -14,7 +14,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -74,26 +73,6 @@ func AWSSSMChaosByID(clients clients.ClientSets) { "Sequence": experimentsDetails.Sequence, }) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -125,14 +104,16 @@ func AWSSSMChaosByID(clients clients.ClientSets) { return } - //Verify the aws ec2 instance is running (pre chaos) - if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + if chaosDetails.DefaultHealthCheck { + //Verify the aws ec2 instance is running (pre chaos) + if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + log.Info("[Status]: EC2 instance is in running state") } - log.Info("[Status]: EC2 instance is in running state") // Including the litmus lib for aws-ssm-chaos-by-id switch experimentsDetails.ChaosLib { @@ -160,33 +141,16 @@ func AWSSSMChaosByID(clients clients.ClientSets) { log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed - //Verify the aws ec2 instance is running (post chaos) - if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - log.Info("[Status]: EC2 instance is in running state (post chaos)") - - //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + //Verify the aws ec2 instance is running (post chaos) + if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status, err: %v", err) + failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + log.Info("[Status]: EC2 instance is in running state (post chaos)") } if experimentsDetails.EngineName != "" { diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go index ed3049831..5f0b6359a 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go @@ -14,7 +14,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -74,34 +73,16 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { "Sequence": experimentsDetails.Sequence, }) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + //Verify that the instance should have permission to perform ssm api calls + if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil { + log.Errorf("target instance status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } } - //Verify that the instance should have permission to perform ssm api calls - if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil { - log.Errorf("target instance status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -151,33 +132,15 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed - //Verify the aws ec2 instance is running (post chaos) - if err := ec2.InstanceStatusCheckByTag(experimentsDetails.EC2InstanceTag, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - log.Info("[Status]: EC2 instance is in running state (post chaos)") - - //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + //Verify the aws ec2 instance is running (post chaos) + if err := ec2.InstanceStatusCheckByTag(experimentsDetails.EC2InstanceTag, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status, err: %v", err) + failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + log.Info("[Status]: EC2 instance is in running state (post chaos)") } if experimentsDetails.EngineName != "" { diff --git a/experiments/cassandra/pod-delete/experiment/pod-delete.go b/experiments/cassandra/pod-delete/experiment/pod-delete.go index 3caf3a081..128bc6ee9 100644 --- a/experiments/cassandra/pod-delete/experiment/pod-delete.go +++ b/experiments/cassandra/pod-delete/experiment/pod-delete.go @@ -77,7 +77,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ChaoslibDetail.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err = status.AUTStatusCheck(experimentsDetails.ChaoslibDetail.AppNS, experimentsDetails.ChaoslibDetail.AppLabel, experimentsDetails.ChaoslibDetail.TargetContainer, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -100,7 +100,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { if experimentsDetails.ChaoslibDetail.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -108,13 +108,13 @@ func CasssandraPodDelete(clients clients.ClientSets) { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -155,7 +155,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err = status.AUTStatusCheck(experimentsDetails.ChaoslibDetail.AppNS, experimentsDetails.ChaoslibDetail.AppLabel, experimentsDetails.ChaoslibDetail.TargetContainer, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -178,20 +178,20 @@ func CasssandraPodDelete(clients clients.ClientSets) { if experimentsDetails.ChaoslibDetail.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/container-kill/experiment/container-kill.go b/experiments/generic/container-kill/experiment/container-kill.go index c4e54641a..c8e1fb5e3 100644 --- a/experiments/generic/container-kill/experiment/container-kill.go +++ b/experiments/generic/container-kill/experiment/container-kill.go @@ -75,7 +75,7 @@ func ContainerKill(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -89,7 +89,7 @@ func ContainerKill(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -97,13 +97,13 @@ func ContainerKill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -137,7 +137,7 @@ func ContainerKill(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -151,20 +151,20 @@ func ContainerKill(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/disk-fill/experiment/disk-fill.go b/experiments/generic/disk-fill/experiment/disk-fill.go index b5b8f5d75..6b97c0afc 100644 --- a/experiments/generic/disk-fill/experiment/disk-fill.go +++ b/experiments/generic/disk-fill/experiment/disk-fill.go @@ -73,7 +73,7 @@ func DiskFill(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -98,7 +98,7 @@ func DiskFill(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -106,13 +106,13 @@ func DiskFill(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -139,7 +139,7 @@ func DiskFill(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -164,20 +164,20 @@ func DiskFill(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go index ef706ae73..dea47f990 100644 --- a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go +++ b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go @@ -72,34 +72,36 @@ func DockerServiceKill(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to check the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to check the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -145,31 +147,33 @@ func DockerServiceKill(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go index e41a60321..42de98e8b 100644 --- a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go +++ b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go @@ -72,34 +72,36 @@ func KubeletServiceKill(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -145,31 +147,33 @@ func KubeletServiceKill(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go index 3943aa2fc..3429f5892 100644 --- a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go +++ b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go @@ -73,34 +73,36 @@ func NodeCPUHog(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -146,30 +148,32 @@ func NodeCPUHog(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Infof("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/node-drain/experiment/node-drain.go b/experiments/generic/node-drain/experiment/node-drain.go index e4a36e25e..833fdde11 100644 --- a/experiments/generic/node-drain/experiment/node-drain.go +++ b/experiments/generic/node-drain/experiment/node-drain.go @@ -72,34 +72,36 @@ func NodeDrain(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -145,31 +147,33 @@ func NodeDrain(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/node-io-stress/experiment/node-io-stress.go b/experiments/generic/node-io-stress/experiment/node-io-stress.go index dce1d7bc8..de1f9b7d3 100644 --- a/experiments/generic/node-io-stress/experiment/node-io-stress.go +++ b/experiments/generic/node-io-stress/experiment/node-io-stress.go @@ -75,34 +75,36 @@ func NodeIOStress(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -148,23 +150,25 @@ func NodeIOStress(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Infof("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } } // Checking the status of target nodes diff --git a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go index 9764d8767..307d3d631 100644 --- a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go +++ b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go @@ -74,34 +74,36 @@ func NodeMemoryHog(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -147,31 +149,33 @@ func NodeMemoryHog(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Infof("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/node-restart/experiment/node-restart.go b/experiments/generic/node-restart/experiment/node-restart.go index f89abf5b3..ecd78b7fb 100644 --- a/experiments/generic/node-restart/experiment/node-restart.go +++ b/experiments/generic/node-restart/experiment/node-restart.go @@ -67,34 +67,36 @@ func NodeRestart(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { @@ -140,31 +142,33 @@ func NodeRestart(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Infof("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Infof("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/node-taint/experiment/node-taint.go b/experiments/generic/node-taint/experiment/node-taint.go index 3d920ebd2..39f89de8e 100644 --- a/experiments/generic/node-taint/experiment/node-taint.go +++ b/experiments/generic/node-taint/experiment/node-taint.go @@ -73,34 +73,36 @@ func NodeTaint(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Target nodes are not in the ready state, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() - types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Target nodes are not in the ready state, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the status of nodes, err: " + err.Error() + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test @@ -145,31 +147,33 @@ func NodeTaint(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - // Checking the status of target nodes - log.Info("[Status]: Getting the status of target nodes") - if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) - types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) - events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK + if experimentsDetails.AuxiliaryAppInfo != "" { + log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") + if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Errorf("Auxiliary Application status check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } + + // Checking the status of target nodes + log.Info("[Status]: Getting the status of target nodes") + if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } } if experimentsDetails.EngineName != "" { diff --git a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go index 2dbe644a2..33722fcd9 100644 --- a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go +++ b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go @@ -74,7 +74,7 @@ func PodAutoscaler(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -88,7 +88,7 @@ func PodAutoscaler(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -96,13 +96,13 @@ func PodAutoscaler(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -129,7 +129,7 @@ func PodAutoscaler(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -144,20 +144,20 @@ func PodAutoscaler(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go index dc78f781e..d1b69f203 100644 --- a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go +++ b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go @@ -73,7 +73,7 @@ func PodCPUHogExec(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodCPUHogExec(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodCPUHogExec(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -128,7 +128,7 @@ func PodCPUHogExec(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -142,20 +142,20 @@ func PodCPUHogExec(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go index 66891fe66..c25e5cf61 100644 --- a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go +++ b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go @@ -73,7 +73,7 @@ func PodCPUHog(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodCPUHog(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodCPUHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -135,7 +135,7 @@ func PodCPUHog(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -149,20 +149,20 @@ func PodCPUHog(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-delete/experiment/pod-delete.go b/experiments/generic/pod-delete/experiment/pod-delete.go index 2dc8e993e..70ed93dd2 100644 --- a/experiments/generic/pod-delete/experiment/pod-delete.go +++ b/experiments/generic/pod-delete/experiment/pod-delete.go @@ -73,7 +73,7 @@ func PodDelete(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodDelete(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodDelete(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -135,7 +135,7 @@ func PodDelete(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -149,20 +149,20 @@ func PodDelete(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go index 009107576..ee2971f82 100644 --- a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go +++ b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go @@ -73,7 +73,7 @@ func PodDNSError(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodDNSError(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodDNSError(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -128,7 +128,7 @@ func PodDNSError(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -142,20 +142,20 @@ func PodDNSError(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go index 7786588d7..e059ce81d 100644 --- a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go +++ b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go @@ -74,7 +74,7 @@ func PodDNSSpoof(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -88,7 +88,7 @@ func PodDNSSpoof(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -97,13 +97,13 @@ func PodDNSSpoof(clients clients.ClientSets) { if err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -130,7 +130,7 @@ func PodDNSSpoof(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -144,20 +144,20 @@ func PodDNSSpoof(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go index 4013bee7a..438f3b549 100644 --- a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go +++ b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go @@ -72,7 +72,7 @@ func PodFioStress(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -86,7 +86,7 @@ func PodFioStress(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -94,13 +94,13 @@ func PodFioStress(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -127,7 +127,7 @@ func PodFioStress(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -141,20 +141,20 @@ func PodFioStress(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go index cf4e6c4bc..8381d6345 100644 --- a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go +++ b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go @@ -73,7 +73,7 @@ func PodHttpLatency(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodHttpLatency(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodHttpLatency(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -127,7 +127,7 @@ func PodHttpLatency(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -141,20 +141,20 @@ func PodHttpLatency(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go index 27ab3bc9f..4865faab1 100644 --- a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go +++ b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go @@ -73,7 +73,7 @@ func PodHttpModifyBody(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -128,7 +128,7 @@ func PodHttpModifyBody(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) diff --git a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go index dbcfe3ca0..9bb640017 100644 --- a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go +++ b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go @@ -73,7 +73,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodHttpModifyHeader(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -127,7 +127,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -141,20 +141,20 @@ func PodHttpModifyHeader(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go index cb93b2712..e864d5482 100644 --- a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go +++ b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go @@ -72,7 +72,7 @@ func PodHttpResetPeer(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -127,7 +127,7 @@ func PodHttpResetPeer(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) diff --git a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go index 3dea766b4..8e5f557b6 100644 --- a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go +++ b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go @@ -82,7 +82,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { } //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -137,7 +137,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) diff --git a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go index d872eee37..59ed72794 100644 --- a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go +++ b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go @@ -73,7 +73,7 @@ func PodIOStress(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodIOStress(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodIOStress(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -135,7 +135,7 @@ func PodIOStress(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -149,20 +149,20 @@ func PodIOStress(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go index 522790f20..cacacdc5a 100644 --- a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go +++ b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go @@ -73,7 +73,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -87,7 +87,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -95,13 +95,13 @@ func PodMemoryHogExec(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -128,7 +128,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -142,20 +142,20 @@ func PodMemoryHogExec(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go index 6ee939bd6..1ac8eaded 100644 --- a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go +++ b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go @@ -74,7 +74,7 @@ func PodMemoryHog(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed,, err: %v", err) @@ -88,7 +88,7 @@ func PodMemoryHog(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -96,13 +96,13 @@ func PodMemoryHog(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -136,7 +136,7 @@ func PodMemoryHog(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -150,20 +150,20 @@ func PodMemoryHog(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go index cd0011cd1..e21f7e60b 100644 --- a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go +++ b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go @@ -75,7 +75,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -89,7 +89,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -97,13 +97,13 @@ func PodNetworkCorruption(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -137,7 +137,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -151,20 +151,20 @@ func PodNetworkCorruption(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go index 82ab15839..453e41885 100644 --- a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go +++ b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go @@ -75,7 +75,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -89,7 +89,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -97,13 +97,13 @@ func PodNetworkDuplication(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -137,7 +137,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -151,20 +151,20 @@ func PodNetworkDuplication(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go index 771dc8fec..706dd488b 100644 --- a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go +++ b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go @@ -75,7 +75,7 @@ func PodNetworkLatency(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -89,20 +89,20 @@ func PodNetworkLatency(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event @@ -137,7 +137,7 @@ func PodNetworkLatency(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -151,20 +151,20 @@ func PodNetworkLatency(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go index e8e2e0e2e..dbbf453e8 100644 --- a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go +++ b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go @@ -75,7 +75,7 @@ func PodNetworkLoss(clients clients.ClientSets) { go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -89,7 +89,7 @@ func PodNetworkLoss(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -97,13 +97,13 @@ func PodNetworkLoss(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -137,7 +137,7 @@ func PodNetworkLoss(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Infof("Application status check failed, err: %v", err) @@ -151,20 +151,20 @@ func PodNetworkLoss(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go index 5beb16fe3..2f4859ca1 100644 --- a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go +++ b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go @@ -72,7 +72,7 @@ func PodNetworkPartition(clients clients.ClientSets) { go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -86,7 +86,7 @@ func PodNetworkPartition(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { @@ -94,13 +94,13 @@ func PodNetworkPartition(clients clients.ClientSets) { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating the events for the pre-chaos check types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) @@ -127,7 +127,7 @@ func PodNetworkPartition(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -141,20 +141,20 @@ func PodNetworkPartition(clients clients.ClientSets) { if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go index 280cdab04..8f6312bba 100644 --- a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go +++ b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go @@ -72,7 +72,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { // PRE-CHAOS APPLICATION STATUS CHECK // KAFKA CLUSTER HEALTH CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the Kafka cluster is healthy(pre-chaos)") if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil { log.Errorf("Cluster health check failed, err: %v", err) @@ -147,7 +147,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed // POST-CHAOS KAFKA CLUSTER HEALTH CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the Kafka cluster is healthy(post-chaos)") if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil { log.Errorf("Cluster health check failed, err: %v", err) @@ -161,20 +161,20 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { if experimentsDetails.ChaoslibDetail.EngineName != "" { // marking AUT as running, as we already checked the status of application under test - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "") // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) failStep := "[post-chaos]: Failed while running probes, err: " + err.Error() - msg := common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Unsuccessful") + msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - msg = common.GetStatusMessage(chaosDetails.DefaultAppHealthCheck, "AUT: Running", "Successful") + msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful") } // generating post chaos event diff --git a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go index 303b0b5f1..422e1f437 100644 --- a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go +++ b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go @@ -13,7 +13,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -74,15 +73,6 @@ func EBSLossByID(clients clients.ClientSets) { "Sequence": experimentsDetails.Sequence, }) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -106,25 +96,17 @@ func EBSLossByID(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + + //Verify the aws ec2 instance is attached to ebs volume + if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { + log.Errorf("volume status check failed pre chaos, err: %v", err) + failStep := "[pre-chaos]: Failed to verify if the ebs volume is attached to ec2 instance, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } } - //Verify the aws ec2 instance is attached to ebs volume - if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { - log.Errorf("volume status check failed pre chaos, err: %v", err) - failStep := "[pre-chaos]: Failed to verify if the ebs volume is attached to ec2 instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - // Including the litmus lib for ebs-loss switch experimentsDetails.ChaosLib { case "litmus": @@ -145,33 +127,17 @@ func EBSLossByID(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + //Verify the aws ec2 instance is attached to ebs volume + if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { + log.Errorf("volume status check failed post chaos, err: %v", err) + failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an ec2 instance, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } } - //Verify the aws ec2 instance is attached to ebs volume - if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil { - log.Errorf("volume status check failed post chaos, err: %v", err) - failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an ec2 instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" diff --git a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go index 304678fba..0a91ac315 100644 --- a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go +++ b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go @@ -13,7 +13,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -73,24 +72,6 @@ func EBSLossByTag(clients clients.ClientSets) { "Sequence": experimentsDetails.Sequence, }) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //selecting the target volumes (pre chaos) - //if no volumes found in attached state then this check will fail - if err := aws.SetTargetVolumeIDs(&experimentsDetails); err != nil { - log.Errorf("failed to set the volumes under chaos, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target EBS volumes from tag, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -114,12 +95,13 @@ func EBSLossByTag(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + if chaosDetails.DefaultHealthCheck { + + //selecting the target volumes (pre chaos) + //if no volumes found in attached state then this check will fail + if err := aws.SetTargetVolumeIDs(&experimentsDetails); err != nil { + log.Errorf("failed to set the volumes under chaos, err: %v", err) + failStep := "[pre-chaos]: Failed to select the target EBS volumes from tag, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } @@ -144,32 +126,16 @@ func EBSLossByTag(clients clients.ClientSets) { log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed - //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + //Verify the aws ec2 instance is attached to ebs volume + if err := aws.PostChaosVolumeStatusCheck(&experimentsDetails); err != nil { + log.Errorf("failed to verify the ebs volume is attached to an instance, err: %v", err) + failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an instance, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } - } - //Verify the aws ec2 instance is attached to ebs volume - if err := aws.PostChaosVolumeStatusCheck(&experimentsDetails); err != nil { - log.Errorf("failed to verify the ebs volume is attached to an instance, err: %v", err) - failStep := "[post-chaos]: Failed to verify if the ebs volume is attached to an instance, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return } if experimentsDetails.EngineName != "" { diff --git a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go index 85699f2be..87aa2c3f8 100644 --- a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go +++ b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go @@ -14,7 +14,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -79,26 +78,6 @@ func EC2TerminateByID(clients clients.ClientSets) { // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -122,26 +101,29 @@ func EC2TerminateByID(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - //Verify the aws ec2 instance is running (pre chaos) - log.Info("[Status]: Verify that the aws ec2 instances are in running state (pre-chaos)") - if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - log.Info("[Status]: EC2 instance is in running state") + if chaosDetails.DefaultHealthCheck { - //PRE-CHAOS NODE STATUS CHECK - if experimentsDetails.ManagedNodegroup == "enable" { - log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") - activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(strings.Split(experimentsDetails.Ec2InstanceID, ","), experimentsDetails.Region) - if err != nil { - log.Errorf("Pre chaos node status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running (pre-chaos), err: " + err.Error() + //Verify the aws ec2 instance is running (pre chaos) + log.Info("[Status]: Verify that the aws ec2 instances are in running state (pre-chaos)") + if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + log.Info("[Status]: EC2 instance is in running state") + + //PRE-CHAOS NODE STATUS CHECK + if experimentsDetails.ManagedNodegroup == "enable" { + log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") + activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(strings.Split(experimentsDetails.Ec2InstanceID, ","), experimentsDetails.Region) + if err != nil { + log.Errorf("Pre chaos node status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running (pre-chaos), err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } } // Including the litmus lib for ec2-terminate @@ -163,46 +145,29 @@ func EC2TerminateByID(clients clients.ClientSets) { log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed - // POST-CHAOS ACTIVE NODE COUNT TEST - if experimentsDetails.ManagedNodegroup == "enable" { - log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") - if err := aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { - log.Errorf("Post chaos active node count check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - } + if chaosDetails.DefaultHealthCheck { - //Verify the aws ec2 instance is running (post chaos) - if experimentsDetails.ManagedNodegroup != "enable" { - log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") - if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + // POST-CHAOS ACTIVE NODE COUNT TEST + if experimentsDetails.ManagedNodegroup == "enable" { + log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") + if err := aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { + log.Errorf("Post chaos active node count check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } - log.Info("[Status]: EC2 instance is in running state (post chaos)") - } - //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //Verify the aws ec2 instance is running (post chaos) + if experimentsDetails.ManagedNodegroup != "enable" { + log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") + if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status, err: %v", err) + failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + log.Info("[Status]: EC2 instance is in running state (post chaos)") } } diff --git a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go index cfcb4e151..2b9ad0586 100644 --- a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go +++ b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go @@ -13,7 +13,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" @@ -79,26 +78,6 @@ func EC2TerminateByTag(clients clients.ClientSets) { // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) - //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - } - if experimentsDetails.EngineName != "" { // marking AUT as running, as we already checked the status of application under test msg := "AUT: Running" @@ -122,24 +101,27 @@ func EC2TerminateByTag(clients clients.ClientSets) { events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") } - //selecting the target instance (pre chaos) - if err = litmusLIB.SetTargetInstance(&experimentsDetails); err != nil { - log.Errorf("failed to get the target ec2 instance, err: %v", err) - failStep := "[pre-chaos]: Failed to select the target AWS ec2 instance from tag, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //PRE-CHAOS NODE STATUS CHECK - if experimentsDetails.ManagedNodegroup == "enable" { - log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") - activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region) - if err != nil { - log.Errorf("Pre chaos node status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running, err: " + err.Error() + //selecting the target instance (pre chaos) + if err = litmusLIB.SetTargetInstance(&experimentsDetails); err != nil { + log.Errorf("failed to get the target ec2 instance, err: %v", err) + failStep := "[pre-chaos]: Failed to select the target AWS ec2 instance from tag, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + + //PRE-CHAOS NODE STATUS CHECK + if experimentsDetails.ManagedNodegroup == "enable" { + log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)") + activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region) + if err != nil { + log.Errorf("Pre chaos node status check failed, err: %v", err) + failStep := "[pre-chaos]: Failed to verify that the NUT (Node Under Test) is running, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + } } // Including the litmus lib for ec2-terminate @@ -161,46 +143,28 @@ func EC2TerminateByTag(clients clients.ClientSets) { log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) resultDetails.Verdict = v1alpha1.ResultVerdictPassed - // POST-CHAOS ACTIVE NODE COUNT TEST - if experimentsDetails.ManagedNodegroup == "enable" { - log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") - if err = aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { - log.Errorf("Post chaos active node count check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - } - - //Verify the aws ec2 instance is running (post chaos) - if experimentsDetails.ManagedNodegroup != "enable" { - log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") - if err = aws.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil { - log.Errorf("failed to get the ec2 instance status as running post chaos, err: %v", err) - failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + if chaosDetails.DefaultHealthCheck { + // POST-CHAOS ACTIVE NODE COUNT TEST + if experimentsDetails.ManagedNodegroup == "enable" { + log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)") + if err = aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil { + log.Errorf("Post chaos active node count check failed, err: %v", err) + failStep := "[post-chaos]: Failed to verify the active number of nodes, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } } - log.Info("[Status]: EC2 instance is in running state (post chaos)") - } - //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return + //Verify the aws ec2 instance is running (post chaos) + if experimentsDetails.ManagedNodegroup != "enable" { + log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)") + if err = aws.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil { + log.Errorf("failed to get the ec2 instance status as running post chaos, err: %v", err) + failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error() + result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) + return + } + log.Info("[Status]: EC2 instance is in running state (post chaos)") } } diff --git a/experiments/spring-boot/spring-boot-chaos/experiment/spring-boot-chaos.go b/experiments/spring-boot/spring-boot-chaos/experiment/spring-boot-chaos.go index f0cb0c90a..50e0f6219 100644 --- a/experiments/spring-boot/spring-boot-chaos/experiment/spring-boot-chaos.go +++ b/experiments/spring-boot/spring-boot-chaos/experiment/spring-boot-chaos.go @@ -1,6 +1,8 @@ package experiment import ( + "os" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/spring-boot-chaos/lib" "github.com/litmuschaos/litmus-go/pkg/clients" @@ -14,7 +16,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" - "os" ) // Experiment contains steps to inject chaos @@ -98,7 +99,7 @@ func Experiment(clients clients.ClientSets) { } //PRE-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) @@ -152,7 +153,7 @@ func Experiment(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed // POST-CHAOS APPLICATION STATUS CHECK - if chaosDetails.DefaultAppHealthCheck { + if chaosDetails.DefaultHealthCheck { log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { log.Errorf("Application status check failed, err: %v", err) diff --git a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go index 80a9d65f4..fd5c1b4ec 100644 --- a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go +++ b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go @@ -11,7 +11,6 @@ import ( "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/result" - "github.com/litmuschaos/litmus-go/pkg/status" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" experimentEnv "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/environment" @@ -83,34 +82,18 @@ func VMPoweroff(clients clients.ClientSets) { } //PRE-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)") - if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error() + // PRE-CHAOS VM STATUS CHECK + if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { + log.Errorf("Failed to get the VM status, err: %v", err) + failStep := "[pre-chaos]: Failed to verify the VM status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + log.Info("[Verification]: VMs are in running state (pre-chaos)") } - // PRE-CHAOS VM STATUS CHECK - if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { - log.Errorf("Failed to get the VM status, err: %v", err) - failStep := "[pre-chaos]: Failed to verify the VM status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - log.Info("[Verification]: VMs are in running state (pre-chaos)") - if experimentsDetails.EngineName != "" { // marking IUT as running, as we already checked the status of instance under test msg := "IUT: Running" @@ -154,35 +137,19 @@ func VMPoweroff(clients clients.ClientSets) { resultDetails.Verdict = v1alpha1.ResultVerdictPassed //POST-CHAOS APPLICATION STATUS CHECK - log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") - if err = status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil { - log.Errorf("Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } + if chaosDetails.DefaultHealthCheck { - //POST-CHAOS AUXILIARY APPLICATION STATUS CHECK - if experimentsDetails.AuxiliaryAppInfo != "" { - log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)") - if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { - log.Errorf("Auxiliary Application status check failed, err: %v", err) - failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error() + //POST-CHAOS VM STATUS CHECK + log.Info("[Status]: Verify that the IUT (Instance Under Test) is running (post-chaos)") + if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { + log.Errorf("Failed to get the VM status, err: %v", err) + failStep := "[post-chaos]: Failed to get the VM status, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) return } + log.Info("[Verification]: VMs are in running state (post-chaos)") } - //POST-CHAOS VM STATUS CHECK - log.Info("[Status]: Verify that the IUT (Instance Under Test) is running (post-chaos)") - if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil { - log.Errorf("Failed to get the VM status, err: %v", err) - failStep := "[post-chaos]: Failed to get the VM status, err: " + err.Error() - result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) - return - } - log.Info("[Verification]: VMs are in running state (post-chaos)") - if experimentsDetails.EngineName != "" { // marking IUT as running, as we already checked the status of instance under test msg := "IUT: Running" diff --git a/pkg/types/types.go b/pkg/types/types.go index 4a633e1e0..1e9f62aec 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -72,26 +72,26 @@ type EventDetails struct { // ChaosDetails is for collecting all the global variables type ChaosDetails struct { - ChaosUID clientTypes.UID - ChaosNamespace string - ChaosPodName string - EngineName string - InstanceID string - ExperimentName string - Timeout int - Delay int - AppDetail AppDetails - ChaosDuration int - JobCleanupPolicy string - ProbeImagePullPolicy string - Randomness bool - Targets []v1alpha1.TargetDetails - ParentsResources []string - DefaultAppHealthCheck bool - Annotations map[string]string - Resources corev1.ResourceRequirements - ImagePullSecrets []corev1.LocalObjectReference - Labels map[string]string + ChaosUID clientTypes.UID + ChaosNamespace string + ChaosPodName string + EngineName string + InstanceID string + ExperimentName string + Timeout int + Delay int + AppDetail AppDetails + ChaosDuration int + JobCleanupPolicy string + ProbeImagePullPolicy string + Randomness bool + Targets []v1alpha1.TargetDetails + ParentsResources []string + DefaultHealthCheck bool + Annotations map[string]string + Resources corev1.ResourceRequirements + ImagePullSecrets []corev1.LocalObjectReference + Labels map[string]string } // AppDetails contains all the application related envs @@ -125,7 +125,7 @@ func InitialiseChaosVariables(chaosDetails *ChaosDetails) { chaosDetails.Timeout, _ = strconv.Atoi(Getenv("STATUS_CHECK_TIMEOUT", "180")) chaosDetails.Delay, _ = strconv.Atoi(Getenv("STATUS_CHECK_DELAY", "2")) chaosDetails.AppDetail = appDetails - chaosDetails.DefaultAppHealthCheck, _ = strconv.ParseBool(Getenv("DEFAULT_APP_HEALTH_CHECK", "true")) + chaosDetails.DefaultHealthCheck, _ = strconv.ParseBool(Getenv("DEFAULT_HEALTH_CHECK", "true")) chaosDetails.JobCleanupPolicy = Getenv("JOB_CLEANUP_POLICY", "retain") chaosDetails.ProbeImagePullPolicy = Getenv("LIB_IMAGE_PULL_POLICY", "Always") chaosDetails.ParentsResources = []string{}