Skip to content

Commit

Permalink
Merge pull request #693 from imjaroiswebdev/error-del-schedule-used-b…
Browse files Browse the repository at this point in the history
…y-ep-w-1-layer

Address Schedule can't be deleted when used by EP with one layer configured
  • Loading branch information
imjaroiswebdev authored May 29, 2023
2 parents 4854ba9 + 32e6944 commit cd84d92
Show file tree
Hide file tree
Showing 540 changed files with 71,240 additions and 17,517 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -182,11 +182,13 @@ PAGERDUTY_ACC_INCIDENT_WORKFLOWS=1 make testacc TESTARGS="-run PagerDutyIncident
PAGERDUTY_ACC_SERVICE_INTEGRATION_GENERIC_EMAIL_NO_FILTERS="user@<your_domain>.pagerduty.com" make testacc TESTARGS="-run PagerDutyServiceIntegration_GenericEmailNoFilters"
PAGERDUTY_ACC_CUSTOM_FIELDS=1 make testacc TESTARGS="-run PagerDutyCustomField"
PAGERDUTY_ACC_LICENSE_NAME="Full User" make testacc TESTARGS="-run DataSourcePagerDutyLicense_Basic"
PAGERDUTY_ACC_SCHEDULE_USED_BY_EP_W_1_LAYER=1 make testacc TESTARGS="-run PagerDutyScheduleWithTeams_EscalationPolicyDependantWithOneLayer"
```

| Variable Name | Feature Set |
| ------------------------------------------------------------ | ------------------- |
|--------------------------------------------------------------|---------------------|
| `PAGERDUTY_ACC_INCIDENT_WORKFLOWS` | Incident Workflows |
| `PAGERDUTY_ACC_SERVICE_INTEGRATION_GENERIC_EMAIL_NO_FILTERS` | Service Integration |
| `PAGERDUTY_ACC_CUSTOM_FIELDS` | Custom Fields |
| `PAGERDUTY_ACC_LICENSE_NAME` | Licenses |
| `PAGERDUTY_ACC_SCHEDULE_USED_BY_EP_W_1_LAYER` | Schedule |
25 changes: 13 additions & 12 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,27 @@ go 1.17

require (
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/hc-install v0.5.2
github.com/hashicorp/terraform-exec v0.15.0
github.com/hashicorp/terraform-json v0.13.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.10.1
github.com/heimweh/go-pagerduty v0.0.0-20230421012559-75399decbf4a
)

require (
cloud.google.com/go v0.71.0 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
github.com/agext/levenshtein v1.2.2 // indirect
github.com/apparentlymart/go-cidr v1.0.1 // indirect
github.com/apparentlymart/go-textseg v1.0.0 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.7.0 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-checkpoint v0.5.0 // indirect
Expand All @@ -27,12 +33,8 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.4.1 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/go-version v1.3.0 // indirect
github.com/hashicorp/hc-install v0.3.1 // indirect
github.com/hashicorp/hcl/v2 v2.3.0 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/terraform-exec v0.15.0 // indirect
github.com/hashicorp/terraform-json v0.13.0 // indirect
github.com/hashicorp/terraform-plugin-go v0.5.0 // indirect
github.com/hashicorp/terraform-plugin-log v0.2.0 // indirect
github.com/hashicorp/terraform-registry-address v0.0.0-20210412075316-9b2996cce896 // indirect
Expand All @@ -56,13 +58,12 @@ require (
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect
github.com/zclconf/go-cty v1.9.1 // indirect
go.mongodb.org/mongo-driver v1.10.2 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
golang.org/x/crypto v0.8.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.9.0 // indirect
google.golang.org/api v0.35.0 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb // indirect
Expand Down
127 changes: 103 additions & 24 deletions go.sum

Large diffs are not rendered by default.

167 changes: 129 additions & 38 deletions pagerduty/resource_pagerduty_schedule.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ func resourcePagerDutyScheduleDelete(d *schema.ResourceData, meta interface{}) e

log.Printf("[INFO] Listing Escalation Policies that use schedule : %s", scheduleId)
// Extracting Escalation Policies that use this Schedule
epsAssociatedToSchedule, err := extractEPsAssociatedToSchedule(client, scheduleData)
epsUsingThisSchedule, err := extractEPsUsingASchedule(client, scheduleData)
if err != nil {
return err
}
Expand All @@ -425,30 +425,41 @@ func resourcePagerDutyScheduleDelete(d *schema.ResourceData, meta interface{}) e
// Handling of specific http 400 errors from API call DELETE /schedules
e, ok := err.(*pagerduty.Error)
if !ok || !isErrorScheduleUsedByEP(e) && !isErrorScheduleWOpenIncidents(e) {
log.Printf("[MYDEBUG] isErrorScheduleUsedByEP: %t; isErrorScheduleWOpenIncidents: %t", isErrorScheduleUsedByEP(e), isErrorScheduleWOpenIncidents(e))
return resource.NonRetryableError(err)
}

var workaroundErr error
// An Schedule with open incidents related can't be remove till those
// incidents have been resolved.
linksToIncidentsOpen, workaroundErr := listIncidentsOpenedRelatedToSchedule(client, scheduleData, epsAssociatedToSchedule)
linksToIncidentsOpen, workaroundErr := listIncidentsOpenedRelatedToSchedule(client, scheduleData, epsUsingThisSchedule)
if workaroundErr != nil {
err = fmt.Errorf("%v; %w", err, workaroundErr)
return resource.NonRetryableError(err)
}

if len(linksToIncidentsOpen) > 0 {
hasToShowIncidentRemediationMessage := len(linksToIncidentsOpen) > 0
if hasToShowIncidentRemediationMessage {
var urlLinksMessage string
for _, incident := range linksToIncidentsOpen {
urlLinksMessage = fmt.Sprintf("%s\n%s", urlLinksMessage, incident)
}
return resource.NonRetryableError(fmt.Errorf("Before Removing Schedule %q You must first resolve or reassign the following incidents related with Escalation Policies using this Schedule... %s", scheduleId, urlLinksMessage))
return resource.NonRetryableError(fmt.Errorf("Before destroying Schedule %q You must first resolve or reassign the following incidents related with Escalation Policies using this Schedule... %s", scheduleId, urlLinksMessage))
}

epsDataUsingThisSchedule, errFetchingFullEPs := fetchEPsDataUsingASchedule(epsUsingThisSchedule, client)
if errFetchingFullEPs != nil {
err = fmt.Errorf("%v; %w", err, errFetchingFullEPs)
return resource.RetryableError(err)
}

errBlockingBecauseOfEPs := detectUseOfScheduleByEPsWithOneLayer(scheduleId, epsDataUsingThisSchedule)
if errBlockingBecauseOfEPs != nil {
return resource.NonRetryableError(errBlockingBecauseOfEPs)
}

// Workaround for Schedule being used by escalation policies error
log.Printf("[INFO] Dissociating Escalation Policies that use the Schedule: %s", scheduleId)
workaroundErr = dissociateScheduleFromEPs(client, scheduleId, epsAssociatedToSchedule)
workaroundErr = dissociateScheduleFromEPs(client, scheduleId, epsDataUsingThisSchedule)
if workaroundErr != nil {
err = fmt.Errorf("%v; %w", err, workaroundErr)
}
Expand Down Expand Up @@ -673,67 +684,56 @@ func listIncidentsOpenedRelatedToSchedule(c *pagerduty.Client, schedule *pagerdu
}
return linksToIncidents, nil
}
func extractEPsAssociatedToSchedule(c *pagerduty.Client, schedule *pagerduty.Schedule) ([]string, error) {
func extractEPsUsingASchedule(c *pagerduty.Client, schedule *pagerduty.Schedule) ([]string, error) {
eps := []string{}
for _, ep := range schedule.EscalationPolicies {
eps = append(eps, ep.ID)
}
return eps, nil
}

func dissociateScheduleFromEPs(c *pagerduty.Client, scheduleID string, eps []string) error {
for _, epID := range eps {
isEPFound := false
var ep *pagerduty.EscalationPolicy
errorMessage := fmt.Sprintf("Error while trying to dissociate Schedule %q from Escalation Policy %q", scheduleID, epID)
retryErr := resource.Retry(10*time.Second, func() *resource.RetryError {
resp, _, err := c.EscalationPolicies.Get(epID, &pagerduty.GetEscalationPolicyOptions{})
if err != nil {
if isErrCode(err, 404) {
return nil
}
return resource.RetryableError(err)
}
ep = resp
isEPFound = true
return nil
})
if retryErr != nil {
return fmt.Errorf("%w; %s", retryErr, errorMessage)
}

if !isEPFound {
continue
}
func dissociateScheduleFromEPs(c *pagerduty.Client, scheduleID string, eps []*pagerduty.EscalationPolicy) error {
for _, ep := range eps {
errorMessage := fmt.Sprintf("Error while trying to dissociate Schedule %q from Escalation Policy %q", scheduleID, ep.ID)
err := removeScheduleFromEP(c, scheduleID, ep)
if err != nil {
return fmt.Errorf("%w; %s", err, errorMessage)
}
}

return nil
}

func removeScheduleFromEP(c *pagerduty.Client, scheduleID string, ep *pagerduty.EscalationPolicy) error {
needsToUpdate := false
epr := ep.EscalationRules
// If the Escalation Policy using this Schedule has only one layer then this
// workaround isn't applicable.
if len(epr) < 2 {
return nil
}

for ri, r := range epr {
for index, target := range r.Targets {
isScheduleConfiguredInEscalationRule := target.Type == "schedule_reference" && target.ID == scheduleID
if !isScheduleConfiguredInEscalationRule {
continue
}

if isScheduleConfiguredInEscalationRule {
if len(r.Targets) > 1 {
// Removing Schedule as a configured Target from the Escalation Rules
// slice.
r.Targets = append(r.Targets[:index], r.Targets[index+1:]...)
if len(r.Targets) > 1 {
// Removing Schedule as a configured Target from the Escalation Rules
// slice.
r.Targets = append(r.Targets[:index], r.Targets[index+1:]...)
} else {
// Removing Escalation Rules that will end up having no target configured.
isLastRule := ri == len(epr)-1
if isLastRule {
epr = epr[:ri]
} else {
// Removing Escalation Rules that will end up having no target configured.
epr = append(epr[:ri], epr[ri+1:]...)
}
needsToUpdate = true
}
needsToUpdate = true
}
}
if !needsToUpdate {
Expand All @@ -754,3 +754,94 @@ func removeScheduleFromEP(c *pagerduty.Client, scheduleID string, ep *pagerduty.

return nil
}

func detectUseOfScheduleByEPsWithOneLayer(scheduleId string, eps []*pagerduty.EscalationPolicy) error {
epsFound := []*pagerduty.EscalationPolicy{}
for _, ep := range eps {
epHasNoLayers := len(ep.EscalationRules) == 0
if epHasNoLayers {
continue
}

epHasOneLayer := len(ep.EscalationRules) == 1 && len(ep.EscalationRules[0].Targets) == 1
epHasMultipleLayersButAllTargetThisSchedule := func() bool {
var meetCondition bool
if len(ep.EscalationRules) == 1 {
return meetCondition
}
meetConditionMapping := make(map[int]bool)
for epli, epLayer := range ep.EscalationRules {
meetConditionMapping[epli] = false
isTargetingThisSchedule := epLayer.Targets[0].Type == "schedule_reference" && epLayer.Targets[0].ID == scheduleId
if len(epLayer.Targets) == 1 && isTargetingThisSchedule {
meetConditionMapping[epli] = true
}
}
for _, mc := range meetConditionMapping {
if !mc {
meetCondition = false
break
}
meetCondition = true
}

return meetCondition
}

if !epHasOneLayer && !epHasMultipleLayersButAllTargetThisSchedule() {
continue
}
epsFound = append(epsFound, ep)
}

if len(epsFound) == 0 {
return nil
}

tfState, err := getTFStateSnapshot()
if err != nil {
return err
}

epsNames := []string{}
for _, ep := range epsFound {
epState := tfState.GetResourceStateById(ep.ID)

// To cover the case when the Schedule is used by an Escalation Policy which
// is not being managed by the same TF config which is managing this Schedule.
if epState == nil {
return fmt.Errorf("It is not possible to continue with the destruction of the Schedule %q, because it is being used by Escalation Policy %q which has only one layer configured. Nevertheless, the mentioned Escalation Policy is not managed by this Terraform configuration. So in order to unblock this resource destruction, We suggest you to first make the appropiate changes on the Escalation Policy %s and come back for retrying.", scheduleId, ep.ID, ep.HTMLURL)
}
epsNames = append(epsNames, epState.Name)
}

displayError := fmt.Errorf(`It is not possible to continue with the destruction of the Schedule %q, because it is being used by the Escalation Policy %[2]q which has only one layer configured. Therefore in order to unblock this resource destruction, We suggest you to first execute "terraform apply (or destroy, please act accordingly) -target=pagerduty_escalation_policy.%[2]s"`, scheduleId, epsNames[0])
if len(epsNames) > 1 {
var epsListMessage string
for _, ep := range epsNames {
epsListMessage = fmt.Sprintf("%s\n%s", epsListMessage, ep)
}
displayError = fmt.Errorf(`It is not possible to continue with the destruction of the Schedule %q, because it is being used by multiple Escalation Policies which have only one layer configured. Therefore in order to unblock this resource destruction, We suggest you to first execute "terraform apply (or destroy, please act accordingly) -target=pagerduty_escalation_policy.<Escalation Policy Name here>". e.g: "terraform apply -target=pagerduty_escalation_policy.example". Replacing the example name with the following Escalation Policies which are blocking the deletion of the Schedule...%s`, scheduleId, epsListMessage)
}

return displayError
}

func fetchEPsDataUsingASchedule(eps []string, c *pagerduty.Client) ([]*pagerduty.EscalationPolicy, error) {
fullEPs := []*pagerduty.EscalationPolicy{}
for _, epID := range eps {
retryErr := resource.Retry(10*time.Second, func() *resource.RetryError {
ep, _, err := c.EscalationPolicies.Get(epID, &pagerduty.GetEscalationPolicyOptions{})
if err != nil {
return resource.RetryableError(err)
}
fullEPs = append(fullEPs, ep)
return nil
})
if retryErr != nil {
return fullEPs, retryErr
}
}

return fullEPs, nil
}
Loading

0 comments on commit cd84d92

Please sign in to comment.