Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

batch job definition remove force new and increment revisions #35149

Merged
3 changes: 3 additions & 0 deletions .changelog/35149.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_batch_job_definition: Add update functions instead of ForceNew. Add `deregister_on_new_revision` to allow keeping prior versions ACTIVE when a new revision is published.
```
54 changes: 54 additions & 0 deletions internal/service/batch/findv2.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0

package batch

import (
"context"

"github.com/aws/aws-sdk-go-v2/service/batch"
"github.com/aws/aws-sdk-go-v2/service/batch/types"
"github.com/hashicorp/terraform-provider-aws/internal/tfresource"
)

func FindJobDefinitionV2ByARN(ctx context.Context, conn *batch.Client, arn string) (*types.JobDefinition, error) {
input := &batch.DescribeJobDefinitionsInput{
JobDefinitions: []string{arn},
}

out, err := conn.DescribeJobDefinitions(ctx, input)

if err != nil {
return nil, err
}

if out == nil || len(out.JobDefinitions) == 0 {
return nil, tfresource.NewEmptyResultError(input)
}

if count := len(out.JobDefinitions); count > 1 {
return nil, tfresource.NewTooManyResultsError(count, input)
}

return &out.JobDefinitions[0], nil
}

func ListJobDefinitionsV2ByNameWithStatus(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) ([]types.JobDefinition, error) {
var out []types.JobDefinition

pages := batch.NewDescribeJobDefinitionsPaginator(conn, input)
for pages.HasMorePages() {
page, err := pages.NextPage(ctx)

if err != nil {
return nil, err
}
out = append(out, page.JobDefinitions...)
}

if len(out) == 0 {
return nil, tfresource.NewEmptyResultError(input)
}

return out, nil
}
145 changes: 124 additions & 21 deletions internal/service/batch/job_definition.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ func ResourceJobDefinition() *schema.Resource {
"container_properties": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"eks_properties", "node_properties"},
StateFunc: func(v interface{}) string {
json, _ := structure.NormalizeJsonString(v)
Expand All @@ -71,6 +70,12 @@ func ResourceJobDefinition() *schema.Resource {
ValidateFunc: validJobContainerProperties,
},

"deregister_on_new_revision": {
Type: schema.TypeBool,
Default: true,
Optional: true,
},

"name": {
Type: schema.TypeString,
Required: true,
Expand All @@ -81,7 +86,6 @@ func ResourceJobDefinition() *schema.Resource {
"node_properties": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"container_properties", "eks_properties"},
StateFunc: func(v interface{}) string {
json, _ := structure.NormalizeJsonString(v)
Expand Down Expand Up @@ -323,14 +327,12 @@ func ResourceJobDefinition() *schema.Resource {
"parameters": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},

"platform_capabilities": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringInSlice(batch.PlatformCapability_Values(), false),
Expand All @@ -340,21 +342,18 @@ func ResourceJobDefinition() *schema.Resource {
"propagate_tags": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},

"retry_strategy": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attempts": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntBetween(1, 10),
},
"evaluate_on_exit": {
Expand All @@ -368,7 +367,6 @@ func ResourceJobDefinition() *schema.Resource {
"action": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
return strings.ToLower(v.(string))
},
Expand All @@ -377,7 +375,6 @@ func ResourceJobDefinition() *schema.Resource {
"on_exit_code": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9]*\*?$`), "must contain only numbers, and can optionally end with an asterisk"),
Expand All @@ -386,7 +383,6 @@ func ResourceJobDefinition() *schema.Resource {
"on_reason": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z.:\s]*\*?$`), "must contain letters, numbers, periods, colons, and white space, and can optionally end with an asterisk"),
Expand All @@ -395,7 +391,6 @@ func ResourceJobDefinition() *schema.Resource {
"on_status_reason": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z.:\s]*\*?$`), "must contain letters, numbers, periods, colons, and white space, and can optionally end with an asterisk"),
Expand All @@ -416,7 +411,6 @@ func ResourceJobDefinition() *schema.Resource {
"scheduling_priority": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},

names.AttrTags: tftags.TagsSchema(),
Expand All @@ -425,14 +419,12 @@ func ResourceJobDefinition() *schema.Resource {
"timeout": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attempt_duration_seconds": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(60),
},
},
Expand All @@ -442,7 +434,6 @@ func ResourceJobDefinition() *schema.Resource {
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{batch.JobDefinitionTypeContainer, batch.JobDefinitionTypeMultinode}, true),
},
},
Expand Down Expand Up @@ -624,8 +615,95 @@ func resourceJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta

func resourceJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
var diags diag.Diagnostics
conn := meta.(*conns.AWSClient).BatchConn(ctx)

if d.HasChangesExcept("tags", "tags_all") {
name := d.Get("name").(string)
input := &batch.RegisterJobDefinitionInput{
JobDefinitionName: aws.String(name),
Type: aws.String(d.Get("type").(string)),
}

if v, ok := d.GetOk("container_properties"); ok {
props, err := expandJobContainerProperties(v.(string))
if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

if aws.StringValue(input.Type) == batch.JobDefinitionTypeContainer {
removeEmptyEnvironmentVariables(&diags, props.Environment, cty.GetAttrPath("container_properties"))
input.ContainerProperties = props
}
}

if v, ok := d.GetOk("eks_properties"); ok {
eksProps := v.([]interface{})[0].(map[string]interface{})
if podProps, ok := eksProps["pod_properties"].([]interface{}); ok && len(podProps) > 0 {
props := expandEKSPodProperties(podProps[0].(map[string]interface{}))
input.EksProperties = &batch.EksProperties{
PodProperties: props,
}
}
}

if v, ok := d.GetOk("node_properties"); ok {
props, err := expandJobNodeProperties(v.(string))
if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

for _, node := range props.NodeRangeProperties {
removeEmptyEnvironmentVariables(&diags, node.Container.Environment, cty.GetAttrPath("node_properties"))
}
input.NodeProperties = props
}

if v, ok := d.GetOk("propagate_tags"); ok {
input.PropagateTags = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("parameters"); ok {
input.Parameters = expandJobDefinitionParameters(v.(map[string]interface{}))
}

if v, ok := d.GetOk("platform_capabilities"); ok && v.(*schema.Set).Len() > 0 {
input.PlatformCapabilities = flex.ExpandStringSet(v.(*schema.Set))
}

// Tags only.
if v, ok := d.GetOk("scheduling_priority"); ok {
input.SchedulingPriority = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("retry_strategy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
input.RetryStrategy = expandRetryStrategy(v.([]interface{})[0].(map[string]interface{}))
}

if v, ok := d.GetOk("timeout"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
input.Timeout = expandJobTimeout(v.([]interface{})[0].(map[string]interface{}))
}

jd, err := conn.RegisterJobDefinitionWithContext(ctx, input)

if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

// arn contains revision which is used in the Read call
currentARN := d.Get("arn").(string)
d.SetId(aws.StringValue(jd.JobDefinitionArn))
d.Set("revision", jd.Revision)

if v := d.Get("deregister_on_new_revision"); v == true {
log.Printf("[DEBUG] Deleting Previous Batch Job Definition: %s", currentARN)
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(currentARN),
})

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", currentARN, err)
}
}
}

return append(diags, resourceJobDefinitionRead(ctx, d, meta)...)
}
Expand All @@ -634,13 +712,23 @@ func resourceJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, me
var diags diag.Diagnostics
conn := meta.(*conns.AWSClient).BatchConn(ctx)

log.Printf("[DEBUG] Deleting Batch Job Definition: %s", d.Id())
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(d.Id()),
})
name := d.Get("name").(string)
jds, err := ListActiveJobDefinitionByName(ctx, conn, name)

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", d.Id(), err)
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definitions (%s): %s", name, err)
}

for i := range jds {
arn := aws.StringValue(jds[i].JobDefinitionArn)
log.Printf("[DEBUG] Deleting Batch Job Definition: %s", arn)
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(arn),
})

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", arn, err)
}
}

return diags
Expand Down Expand Up @@ -670,6 +758,21 @@ func FindJobDefinitionByARN(ctx context.Context, conn *batch.Batch, arn string)
return output, nil
}

func ListActiveJobDefinitionByName(ctx context.Context, conn *batch.Batch, name string) ([]*batch.JobDefinition, error) {
input := &batch.DescribeJobDefinitionsInput{
JobDefinitionName: aws.String(name),
Status: aws.String(jobDefinitionStatusActive),
}

output, err := conn.DescribeJobDefinitionsWithContext(ctx, input)

if err != nil {
return nil, err
}

return output.JobDefinitions, nil
}

func findJobDefinition(ctx context.Context, conn *batch.Batch, input *batch.DescribeJobDefinitionsInput) (*batch.JobDefinition, error) {
output, err := conn.DescribeJobDefinitionsWithContext(ctx, input)

Expand Down
4 changes: 4 additions & 0 deletions internal/service/batch/job_definition_data_source_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"fmt"
"testing"

"github.com/YakDriver/regexache"
sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-provider-aws/internal/acctest"
Expand Down Expand Up @@ -65,6 +66,9 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.0.attempts", "10"),
resource.TestCheckResourceAttr(dataSourceName, "revision", "1"),
resource.TestCheckResourceAttr(dataSourceName, "revision", "1"),
resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"),
acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))),
),
},
{
Expand Down
Loading
Loading