From c15347ce5dae8801e31d80f6ba0a91a12e781f5e Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Tue, 19 Dec 2023 18:51:46 -0500 Subject: [PATCH 01/17] Batch datasource Co-authored-by: Drew Mullen --- go.mod | 1 + go.sum | 2 + internal/conns/awsclient_gen.go | 5 + internal/service/batch/enum.go | 16 + internal/service/batch/findv2.go | 54 + .../batch/job_definition_data_source.go | 1314 +++++++++++++++++ .../batch/job_definition_data_source_test.go | 257 ++++ internal/service/batch/service_package_gen.go | 20 +- names/data/names_data.csv | 4 +- names/names.go | 1 + .../docs/d/batch_job_definition.html.markdown | 274 ++++ 11 files changed, 1945 insertions(+), 3 deletions(-) create mode 100644 internal/service/batch/enum.go create mode 100644 internal/service/batch/findv2.go create mode 100644 internal/service/batch/job_definition_data_source.go create mode 100644 internal/service/batch/job_definition_data_source_test.go create mode 100644 website/docs/d/batch_job_definition.html.markdown diff --git a/go.mod b/go.mod index 6d0776f7ea5..91b69a2aad7 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/apprunner v1.27.0 github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 + github.com/aws/aws-sdk-go-v2/service/batch v1.30.5 github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 github.com/aws/aws-sdk-go-v2/service/budgets v1.21.0 diff --git a/go.sum b/go.sum index afc358022a6..90965f91d6f 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,8 @@ github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 h1:oVrFdlLcYETrVftzF0Q/Dr0tf github.com/aws/aws-sdk-go-v2/service/athena v1.39.0/go.mod h1:PPlSmhFoI4r5BGLB+6YDUHSU3E77brazZXLcj2DeQZQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 h1:wW06a5cOpVYJ1NrjmcKpk54xqUYK2PbL0ttOcXKyBrQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0/go.mod h1:KPniIDEIjjhk8v1hkQeBeUcSPS0i/MAGXt80hUr6Cbc= +github.com/aws/aws-sdk-go-v2/service/batch v1.30.5 h1:plf1gPkD4t7yFygClkfxYREpDnLu/tub6tJO6U31TKU= +github.com/aws/aws-sdk-go-v2/service/batch v1.30.5/go.mod h1:PueWUeJBztSAvgaTrbefYvj+kOhBbjE2nia473vk2L8= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 h1:Eah+mRIMPbq3KdgLpUT44nCJi7cECjy5U2fgFO0jiiQ= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0/go.mod h1:orxULvnjYi9X3Na7eGy27KD6uOE8vDvyJCNJejmU92E= github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 h1:pAaehMb08sPnGBvPnm0paurEj6EtjCEwxaw8WZN51LA= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index b57d8c54352..8f6bddaffbb 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -14,6 +14,7 @@ import ( apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" bedrockagent_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrockagent" budgets_sdkv2 "github.com/aws/aws-sdk-go-v2/service/budgets" @@ -347,6 +348,10 @@ func (c *AWSClient) BatchConn(ctx context.Context) *batch_sdkv1.Batch { return errs.Must(conn[*batch_sdkv1.Batch](ctx, c, names.Batch, make(map[string]any))) } +func (c *AWSClient) BatchClient(ctx context.Context) *batch_sdkv2.Client { + return errs.Must(client[*batch_sdkv2.Client](ctx, c, names.Batch, make(map[string]any))) +} + func (c *AWSClient) BedrockClient(ctx context.Context) *bedrock_sdkv2.Client { return errs.Must(client[*bedrock_sdkv2.Client](ctx, c, names.Bedrock, make(map[string]any))) } diff --git a/internal/service/batch/enum.go b/internal/service/batch/enum.go new file mode 100644 index 00000000000..c75ab45ab20 --- /dev/null +++ b/internal/service/batch/enum.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +const ( + JobDefinitionStatusInactive string = "INACTIVE" + JobDefinitionStatusActive string = "ACTIVE" +) + +func JobDefinitionStatus_Values() []string { + return []string{ + JobDefinitionStatusInactive, + JobDefinitionStatusActive, + } +} diff --git a/internal/service/batch/findv2.go b/internal/service/batch/findv2.go new file mode 100644 index 00000000000..812ef300cd6 --- /dev/null +++ b/internal/service/batch/findv2.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/batch" + "github.com/aws/aws-sdk-go-v2/service/batch/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func FindJobDefinitionV2ByARN(ctx context.Context, conn *batch.Client, arn string) (*types.JobDefinition, error) { + input := &batch.DescribeJobDefinitionsInput{ + JobDefinitions: []string{arn}, + } + + out, err := conn.DescribeJobDefinitions(ctx, input) + + if err != nil { + return nil, err + } + + if out == nil || len(out.JobDefinitions) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + if count := len(out.JobDefinitions); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + + return &out.JobDefinitions[0], nil +} + +func ListJobDefinitionsV2ByNameWithStatus(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) ([]types.JobDefinition, error) { + var out []types.JobDefinition + + pages := batch.NewDescribeJobDefinitionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + out = append(out, page.JobDefinitions...) + } + + if out == nil || len(out) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return out, nil +} diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go new file mode 100644 index 00000000000..5a09bbd2c38 --- /dev/null +++ b/internal/service/batch/job_definition_data_source.go @@ -0,0 +1,1314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/batch" + batchtypes "github.com/aws/aws-sdk-go-v2/service/batch/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func newDataSourceJobDefinition(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceJobDefinition{}, nil +} + +const ( + DSNameJobDefinition = "Job Definition Data Source" +) + +func (r *resourceJobQueue) ConfigValidators(_ context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.ExactlyOneOf( + path.MatchRoot("arn"), + path.MatchRoot("name"), + ), + } +} + +type dataSourceJobDefinition struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceJobDefinition) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_batch_job_definition" +} + +func (d *dataSourceJobDefinition) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arn": schema.StringAttribute{ + Optional: true, + CustomType: fwtypes.ARNType, + }, + "container_orchestration_type": schema.StringAttribute{ + Computed: true, + }, + "id": framework.IDAttribute(), + "name": schema.StringAttribute{ + Optional: true, + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "revision": schema.Int64Attribute{ + Optional: true, + }, + "status": schema.StringAttribute{ + Optional: true, + // Default: JobDefinitionStatusActive, + // https://github.com/hashicorp/terraform-plugin-framework/issues/751#issuecomment-1799757575 + Validators: []validator.String{ + stringvalidator.OneOf(JobDefinitionStatus_Values()...), + }, + }, + "scheduling_priority": schema.Int64Attribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "eks_properties": schema.SingleNestedBlock{ + Blocks: map[string]schema.Block{ + "pod_properties": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "dns_policy": schema.StringAttribute{ + Computed: true, + }, + "host_network": schema.BoolAttribute{ + Computed: true, + }, + "service_account_name": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "containers": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "args": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "commands": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "image": schema.StringAttribute{ + Computed: true, + }, + "image_pull_policy": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "env": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "resources": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "limits": schema.MapAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "requests": schema.MapAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + "security_context": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "privileged": schema.BoolAttribute{ + Computed: true, + }, + "run_as_user": schema.Int64Attribute{ + Computed: true, + }, + "run_as_group": schema.Int64Attribute{ + Computed: true, + }, + "run_as_non_root": schema.BoolAttribute{ + Computed: true, + }, + "read_only_root_filesystem": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + "volume_mounts": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "mount_path": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "read_only": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "metadata": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "labels": schema.MapAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + "volumes": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "empty_dir": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "medium": schema.StringAttribute{ + Computed: true, + }, + "size_limit": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "host_path": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "path": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "secret": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "secret_name": schema.StringAttribute{ + Computed: true, + }, + "optional": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "node_properties": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "main_node": schema.Int64Attribute{ + Computed: true, + }, + "num_nodes": schema.Int64Attribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "node_range_properties": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "target_nodes": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "container": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "command": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "execution_role_arn": schema.StringAttribute{ + Computed: true, + }, + "image": schema.StringAttribute{ + Computed: true, + }, + "instance_type": schema.StringAttribute{ + Computed: true, + }, + "job_role_arn": schema.StringAttribute{ + Computed: true, + }, + "privileged": schema.BoolAttribute{ + Computed: true, + }, + "readonly_root_filesystem": schema.BoolAttribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "environment": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "ephemeral_storage": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "size_in_gib": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "fargate_platform_configuration": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "platform_version": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "linux_parameters": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "init_process_enabled": schema.BoolAttribute{ + Computed: true, + }, + "max_swap": schema.Int64Attribute{ + Computed: true, + }, + "shared_memory_size": schema.Int64Attribute{ + Computed: true, + }, + "swappiness": schema.Int64Attribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "devices": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "host_path": schema.StringAttribute{ + Computed: true, + }, + "container_path": schema.StringAttribute{ + Computed: true, + }, + "permissions": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "tmpfs": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "container_path": schema.StringAttribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + "mount_options": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + }, + }, + "log_configuration": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "options": schema.MapAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "log_driver": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "secret_options": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "value_from": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "mount_points": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "container_path": schema.StringAttribute{ + Computed: true, + }, + "read_only": schema.BoolAttribute{ + Computed: true, + }, + "source_volume": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "network_configuration": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "assign_public_ip": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "resource_requirements": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "runtime_platform": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "cpu_architecture": schema.StringAttribute{ + Computed: true, + }, + "operating_system_family": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "secrets": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "value_from": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "ulimits": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "hard_limit": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "soft_limit": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "volumes": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "host": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "source_path": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "efs_volume_configuration": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "file_system_id": schema.StringAttribute{ + Computed: true, + }, + "root_directory": schema.StringAttribute{ + Computed: true, + }, + "transit_encryption": schema.StringAttribute{ + Computed: true, + }, + "transit_encryption_port": schema.Int64Attribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + "authorization_config": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "access_point_id": schema.StringAttribute{ + Computed: true, + }, + "iam": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "retry_strategy": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "attempts": schema.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.Between(1, 10), + }, + }, + }, + Blocks: map[string]schema.Block{ + "evaluate_on_exit": schema.ListNestedBlock{ + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "action": schema.StringAttribute{ + Computed: true, + }, + "on_exit_code": schema.StringAttribute{ + Computed: true, + }, + "on_reason": schema.StringAttribute{ + Computed: true, + }, + "on_status_reason": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "timeout": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "attempt_duration_seconds": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + } +} + +func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().BatchClient(ctx) + + var data dataSourceJobDefinitionData + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + jd := batchtypes.JobDefinition{} + + if !data.ARN.IsNull() { + out, err := FindJobDefinitionV2ByARN(ctx, conn, aws.StringValue(flex.StringFromFramework(ctx, data.ARN))) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), + err.Error(), + ) + return + } + jd = *out + } + + if !data.Name.IsNull() { + input := &batch.DescribeJobDefinitionsInput{ + JobDefinitionName: flex.StringFromFramework(ctx, data.Name), + } + + if data.Status.IsNull() { + active := JobDefinitionStatusActive + input.Status = &active + } else { + input.Status = flex.StringFromFramework(ctx, data.Status) + } + + jds, err := ListJobDefinitionsV2ByNameWithStatus(ctx, conn, input) + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), + err.Error(), + ) + } + + if !data.Revision.IsNull() { + for _, _jd := range jds { + if aws.Int32Value(_jd.Revision) == int32(data.Revision.ValueInt64()) { + jd = _jd + } + } + + if jd.JobDefinitionArn == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), fmt.Errorf("job definition revision %d not found", data.Revision.ValueInt64())), + fmt.Sprintf("job definition revision %d not found with name %s", data.Revision.ValueInt64(), data.Name.String()), + ) + } + } + + if data.Revision.IsNull() { + var latestRevision int32 = 0 + for _, _jd := range jds { + if aws.Int32Value(_jd.Revision) > latestRevision { + latestRevision = aws.Int32Value(_jd.Revision) + jd = _jd + } + } + } + } + + // These fields don't have the same name as their api + data.ARN = flex.StringToFrameworkARN(ctx, jd.JobDefinitionArn) + data.ID = flex.StringToFramework(ctx, jd.JobDefinitionArn) + data.Name = flex.StringToFramework(ctx, jd.JobDefinitionName) + data.Revision = flex.Int32ToFramework(ctx, jd.Revision) + data.Status = flex.StringToFramework(ctx, jd.Status) + data.Type = flex.StringToFramework(ctx, jd.Type) + data.ContainerOrchestrationType = types.StringValue(string(jd.ContainerOrchestrationType)) + data.SchedulingPriority = flex.Int32ToFramework(ctx, jd.SchedulingPriority) + if jd.Timeout != nil { + data.Timeout = types.ObjectValueMust(timeoutAttr, map[string]attr.Value{ + "attempt_duration_seconds": flex.Int32ToFramework(ctx, jd.Timeout.AttemptDurationSeconds), + }) + } else { + data.Timeout = types.ObjectNull(timeoutAttr) + } + + resp.Diagnostics.Append(frameworkFlattenNodeProperties(ctx, jd.NodeProperties, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(frameworkFlattenEKSproperties(ctx, jd.EksProperties, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(frameworkFlattenRetryStrategy(ctx, jd.RetryStrategy, &data)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.EksProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { + if apiObject == nil { + data.EksProperties = types.ObjectNull(eksPropertiesAttr) + return + } + props := map[string]attr.Value{ + "dns_policy": flex.StringToFramework(ctx, apiObject.PodProperties.DnsPolicy), + "host_network": flex.BoolToFramework(ctx, apiObject.PodProperties.HostNetwork), + "service_account_name": flex.StringToFramework(ctx, apiObject.PodProperties.ServiceAccountName), + } + + if apiObject.PodProperties.Metadata != nil { + props["metadata"] = types.ObjectValueMust(eksMetadataAttr, map[string]attr.Value{ + "labels": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(apiObject.PodProperties.Metadata.Labels)), + }) + } else { + props["metadata"] = types.ObjectNull(eksMetadataAttr) + } + + if len(apiObject.PodProperties.Containers) > 0 { + props["containers"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerAttr}, frameworkFlattenEKSContainer(ctx, apiObject.PodProperties.Containers)) + } else { + props["containers"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerAttr}) + } + if len(apiObject.PodProperties.Volumes) > 0 { + props["volumes"] = types.ListValueMust(types.ObjectType{AttrTypes: eksVolumeAttr}, frameworkFlattenEKSVolume(ctx, apiObject.PodProperties.Volumes)) + } else { + props["volumes"] = types.ListNull(types.ObjectType{AttrTypes: eksVolumeAttr}) + } + data.EksProperties = types.ObjectValueMust(eksPropertiesAttr, map[string]attr.Value{ + "pod_properties": types.ObjectValueMust(eksPodPropertiesAttr, props), + }) + return +} + +func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.EksContainer) []attr.Value { + var containers []attr.Value + for _, c := range apiObject { + props := map[string]attr.Value{ + "image": flex.StringToFramework(ctx, c.Image), + "image_pull_policy": flex.StringToFramework(ctx, c.ImagePullPolicy), + "name": flex.StringToFramework(ctx, c.Name), + "args": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Args)), + "commands": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Command)), + } + if c.SecurityContext != nil { + props["security_context"] = types.ObjectValueMust(eksContainerSecurityContextAttr, map[string]attr.Value{ + "privileged": flex.BoolToFramework(ctx, c.SecurityContext.Privileged), + "run_as_user": flex.Int64ToFramework(ctx, c.SecurityContext.RunAsUser), + "run_as_group": flex.Int64ToFramework(ctx, c.SecurityContext.RunAsGroup), + "run_as_non_root": flex.BoolToFramework(ctx, c.SecurityContext.RunAsNonRoot), + "read_only_root_filesystem": flex.BoolToFramework(ctx, c.SecurityContext.ReadOnlyRootFilesystem), + }) + } else { + props["security_context"] = types.ObjectNull(eksContainerSecurityContextAttr) + } + if len(c.VolumeMounts) > 0 { + props["volume_mounts"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}, frameworkFlattenEKSContainerVolumeMount(ctx, c.VolumeMounts)) + } else { + props["volume_mounts"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}) + } + + if len(c.Env) > 0 { + props["env"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}, frameworkFlattenEKSContainerEnv(ctx, c.Env)) + } else { + props["env"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}) + } + + if c.Resources != nil { + props["resources"] = types.ObjectValueMust(eksContainerResourceRequirementsAttr, map[string]attr.Value{ + "limits": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.Resources.Limits)), + "requests": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.Resources.Requests)), + }) + } else { + props["resources"] = types.ObjectNull(eksContainerResourceRequirementsAttr) + } + + containers = append(containers, types.ObjectValueMust(eksContainerAttr, props)) + } + return containers +} + +func frameworkFlattenNodeProperties(ctx context.Context, props *batchtypes.NodeProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { + att := fwtypes.AttributeTypesMust[frameworkNodeProperties](ctx) + if props == nil { + data.EksProperties = types.ObjectNull(att) + return + } + att["node_range_properties"] = types.ListType{ElemType: types.ObjectType{AttrTypes: nodeRangePropertiesAttr}} + if props == nil { + data.NodeProperties = types.ObjectNull(att) + return + } + + var properties []attr.Value + for _, v := range props.NodeRangeProperties { + container, d := types.ObjectValue(containerPropertiesAttr, frameworkFlattenContainerProperties(ctx, v.Container)) + diags = append(diags, d...) + if diags.HasError() { + return + } + properties = append(properties, types.ObjectValueMust(nodeRangePropertiesAttr, map[string]attr.Value{ + "container": container, + "target_nodes": flex.StringToFramework(ctx, v.TargetNodes), + })) + } + data.NodeProperties = types.ObjectValueMust(att, map[string]attr.Value{ + "main_node": flex.Int32ToFramework(ctx, props.MainNode), + "num_nodes": flex.Int32ToFramework(ctx, props.NumNodes), + "node_range_properties": types.ListValueMust(types.ObjectType{AttrTypes: nodeRangePropertiesAttr}, properties), + }) + return +} + +func frameworkFlattenEKSVolume(ctx context.Context, apiObject []batchtypes.EksVolume) (volumes []attr.Value) { + for _, v := range apiObject { + volume := map[string]attr.Value{ + "name": flex.StringToFramework(ctx, v.Name), + } + if v.EmptyDir != nil { + volume["empty_dir"] = types.ObjectValueMust(eksVolumeEmptyDirAttr, map[string]attr.Value{ + "medium": flex.StringToFramework(ctx, v.EmptyDir.Medium), + "size_limit": flex.StringToFramework(ctx, v.EmptyDir.SizeLimit), + }) + } else { + volume["empty_dir"] = types.ObjectNull(eksVolumeEmptyDirAttr) + } + if v.HostPath != nil { + volume["host"] = types.ObjectValueMust(eksVolumeHostPathAttr, map[string]attr.Value{ + "path": flex.StringToFramework(ctx, v.HostPath.Path), + }) + } else { + volume["host"] = types.ObjectNull(eksVolumeHostPathAttr) + } + if v.Secret != nil { + volume["secret"] = types.ObjectValueMust(eksVolumeSecretAttr, map[string]attr.Value{ + "secret_name": flex.StringToFramework(ctx, v.Secret.SecretName), + "optional": flex.BoolToFramework(ctx, v.Secret.Optional), + }) + } else { + volume["secret"] = types.ObjectNull(eksVolumeSecretAttr) + } + volumes = append(volumes, types.ObjectValueMust(eksVolumeAttr, volume)) + } + return +} + +func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []batchtypes.EksContainerVolumeMount) (volumeMounts []attr.Value) { + for _, v := range apiObject { + volumeMounts = append(volumeMounts, types.ObjectValueMust(eksContainerVolumeMountAttr, map[string]attr.Value{ + "mount_path": flex.StringToFramework(ctx, v.MountPath), + "name": flex.StringToFramework(ctx, v.Name), + "read_only": flex.BoolToFramework(ctx, v.ReadOnly), + })) + } + return +} + +func frameworkFlattenEKSContainerEnv(ctx context.Context, apiObject []batchtypes.EksContainerEnvironmentVariable) (env []attr.Value) { + for _, v := range apiObject { + env = append(env, types.ObjectValueMust(eksContainerEnvironmentVariableAttr, map[string]attr.Value{ + "name": flex.StringToFramework(ctx, v.Name), + "value": flex.StringToFramework(ctx, v.Value), + })) + } + return +} + +func frameworkFlattenContainerProperties(ctx context.Context, c *batchtypes.ContainerProperties) map[string]attr.Value { + containerProps := map[string]attr.Value{ + "command": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Command)), + "execution_role_arn": flex.StringToFramework(ctx, c.ExecutionRoleArn), + "image": flex.StringToFramework(ctx, c.Image), + "instance_type": flex.StringToFramework(ctx, c.InstanceType), + "job_role_arn": flex.StringToFramework(ctx, c.JobRoleArn), + "privileged": flex.BoolToFramework(ctx, c.Privileged), + "readonly_root_filesystem": flex.BoolToFramework(ctx, c.ReadonlyRootFilesystem), + "user": flex.StringToFramework(ctx, c.User), + } + + if (c.EphemeralStorage != nil) && (c.EphemeralStorage.SizeInGiB != nil) { + containerProps["ephemeral_storage"] = types.ObjectValueMust(ephemeralStorageAttr, map[string]attr.Value{ + "size_in_gib": flex.Int32ToFramework(ctx, c.EphemeralStorage.SizeInGiB), + }) + } else { + containerProps["ephemeral_storage"] = types.ObjectNull(ephemeralStorageAttr) + } + + if c.LinuxParameters != nil { + containerProps["linux_parameters"] = types.ObjectValueMust( + linuxParametersAttr, + frameworkFlattenContainerLinuxParameters(ctx, c.LinuxParameters), + ) + } else { + containerProps["linux_parameters"] = types.ObjectNull(linuxParametersAttr) + } + + if c.FargatePlatformConfiguration != nil { + containerProps["fargate_platform_configuration"] = types.ObjectValueMust(fargatePlatformConfigurationAttr, map[string]attr.Value{ + "platform_version": flex.StringToFramework(ctx, c.FargatePlatformConfiguration.PlatformVersion), + }) + } else { + containerProps["fargate_platform_configuration"] = types.ObjectNull(fargatePlatformConfigurationAttr) + } + + if c.NetworkConfiguration != nil { + containerProps["network_configuration"] = types.ObjectValueMust(networkConfigurationAttr, map[string]attr.Value{ + "assign_public_ip": flex.StringToFramework(ctx, aws.String(string(c.NetworkConfiguration.AssignPublicIp))), + }) + } else { + containerProps["network_configuration"] = types.ObjectNull(networkConfigurationAttr) + } + + if c.RuntimePlatform != nil { + containerProps["runtime_platform"] = types.ObjectValueMust(runtimePlatformAttr, map[string]attr.Value{ + "cpu_architecture": flex.StringToFramework(ctx, c.RuntimePlatform.CpuArchitecture), + "operating_system_family": flex.StringToFramework(ctx, c.RuntimePlatform.OperatingSystemFamily), + }) + } else { + containerProps["runtime_platform"] = types.ObjectNull(runtimePlatformAttr) + } + + var environment []attr.Value + if len(c.Environment) > 0 { + for _, env := range c.Environment { + environment = append(environment, types.ObjectValueMust(keyValuePairAttr, map[string]attr.Value{ + "name": flex.StringToFramework(ctx, env.Name), + "value": flex.StringToFramework(ctx, env.Value), + })) + } + containerProps["environment"] = types.ListValueMust(types.ObjectType{AttrTypes: keyValuePairAttr}, environment) + } else { + containerProps["environment"] = types.ListNull(types.ObjectType{AttrTypes: keyValuePairAttr}) + } + if len(c.Environment) > 0 { + var mountPoints []attr.Value + for _, m := range c.MountPoints { + mountPoints = append(environment, types.ObjectValueMust(mountPointAttr, map[string]attr.Value{ + "container_path": flex.StringToFramework(ctx, m.ContainerPath), + "read_only": flex.BoolToFramework(ctx, m.ReadOnly), + "source_volume": flex.StringToFramework(ctx, m.SourceVolume), + })) + } + containerProps["mount_points"] = types.ListValueMust(types.ObjectType{AttrTypes: mountPointAttr}, mountPoints) + } else { + containerProps["mount_points"] = types.ListNull(types.ObjectType{AttrTypes: mountPointAttr}) + } + + var logConfigurationSecrets []attr.Value + if c.LogConfiguration != nil { + if len(c.LogConfiguration.SecretOptions) > 0 { + for _, sec := range c.LogConfiguration.SecretOptions { + logConfigurationSecrets = append(logConfigurationSecrets, types.ObjectValueMust(secretAttr, map[string]attr.Value{ + "name": flex.StringToFramework(ctx, sec.Name), + "value_from": flex.StringToFramework(ctx, sec.ValueFrom), + })) + } + containerProps["log_configuration"] = types.ObjectValueMust(logConfigurationAttr, map[string]attr.Value{ + "options": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.LogConfiguration.Options)), + "log_driver": flex.StringToFramework(ctx, aws.String(string(c.LogConfiguration.LogDriver))), + "secret_opts": types.ListValueMust(types.ObjectType{AttrTypes: secretAttr}, logConfigurationSecrets), + }) + } else { + containerProps["log_configuration"] = types.ObjectValueMust(logConfigurationAttr, map[string]attr.Value{ + "options": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.LogConfiguration.Options)), + "log_driver": flex.StringToFramework(ctx, aws.String(string(c.LogConfiguration.LogDriver))), + "secret_opts": types.ListNull(types.ObjectType{AttrTypes: secretAttr}), + }) + } + } else { + containerProps["log_configuration"] = types.ObjectNull(logConfigurationAttr) + } + + var resourceRequirements []attr.Value + if len(c.ResourceRequirements) > 0 { + for _, res := range c.ResourceRequirements { + resourceRequirements = append(resourceRequirements, types.ObjectValueMust(resourceRequirementsAttr, map[string]attr.Value{ + "type": flex.StringToFramework(ctx, aws.String(string(res.Type))), + "value": flex.StringToFramework(ctx, res.Value), + })) + } + containerProps["resource_requirements"] = types.ListValueMust(types.ObjectType{AttrTypes: resourceRequirementsAttr}, resourceRequirements) + } else { + containerProps["resource_requirements"] = types.ListNull(types.ObjectType{AttrTypes: resourceRequirementsAttr}) + } + + var secrets []attr.Value + if len(c.Secrets) > 0 { + for _, sec := range c.Secrets { + secrets = append(secrets, types.ObjectValueMust(secretAttr, map[string]attr.Value{ + "name": flex.StringToFramework(ctx, sec.Name), + "value_from": flex.StringToFramework(ctx, sec.ValueFrom), + })) + } + containerProps["secrets"] = types.ListValueMust(types.ObjectType{AttrTypes: secretAttr}, secrets) + } else { + containerProps["secrets"] = types.ListNull(types.ObjectType{AttrTypes: secretAttr}) + } + + if len(c.Ulimits) > 0 { + var ulimits []attr.Value + for _, ul := range c.Ulimits { + ulimits = append(ulimits, types.ObjectValueMust(ulimitsAttr, map[string]attr.Value{ + "hard_limit": flex.Int32ToFramework(ctx, ul.HardLimit), + "name": flex.StringToFramework(ctx, ul.Name), + "soft_limit": flex.Int32ToFramework(ctx, ul.SoftLimit), + })) + } + containerProps["ulimits"] = types.ListValueMust(types.ObjectType{AttrTypes: ulimitsAttr}, ulimits) + } else { + containerProps["ulimits"] = types.ListNull(types.ObjectType{AttrTypes: ulimitsAttr}) + } + + if len(c.Volumes) > 0 { + var volumes []attr.Value + for _, vol := range c.Volumes { + volume := map[string]attr.Value{ + "name": flex.StringToFramework(ctx, vol.Name), + } + if vol.Host != nil { + volume["host"] = types.ObjectValueMust(hostAttr, map[string]attr.Value{ + "source_path": flex.StringToFramework(ctx, vol.Host.SourcePath), + }) + } + if vol.EfsVolumeConfiguration != nil { + volume["efs_volume_configuration"] = types.ObjectValueMust(efsVolumeConfigurationAttr, map[string]attr.Value{ + "file_system_id": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.FileSystemId), + "root_directory": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.RootDirectory), + "transit_encryption": flex.StringToFramework(ctx, aws.String(string(vol.EfsVolumeConfiguration.TransitEncryption))), + "transit_encryption_port": flex.Int32ToFramework(ctx, vol.EfsVolumeConfiguration.TransitEncryptionPort), + "authorization_config": types.ObjectValueMust(authorizationConfigAttr, map[string]attr.Value{ + "access_point_id": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.AuthorizationConfig.AccessPointId), + "iam": flex.StringToFramework(ctx, aws.String(string(vol.EfsVolumeConfiguration.AuthorizationConfig.Iam))), + }), + }) + + } + volumes = append(volumes, types.ObjectValueMust(volumeAttr, volume)) + } + } else { + containerProps["volumes"] = types.ListNull(types.ObjectType{AttrTypes: volumeAttr}) + } + return containerProps +} + +func frameworkFlattenContainerLinuxParameters(ctx context.Context, lp *batchtypes.LinuxParameters) map[string]attr.Value { + linuxProps := map[string]attr.Value{ + "init_process_enabled": flex.BoolToFramework(ctx, lp.InitProcessEnabled), + "max_swap": flex.Int32ToFramework(ctx, lp.MaxSwap), + "shared_memory_size": flex.Int32ToFramework(ctx, lp.SharedMemorySize), + "swappiness": flex.Int32ToFramework(ctx, lp.Swappiness), + } + if len(lp.Devices) > 0 { + linuxProps["devices"] = types.ListValueMust(types.ObjectType{AttrTypes: deviceAttr}, frameworkFlattenContainerDevices(ctx, lp.Devices)) + } else { + linuxProps["devices"] = types.ListNull(types.ObjectType{AttrTypes: deviceAttr}) + } + if len(lp.Tmpfs) > 0 { + linuxProps["tmpfs"] = types.ListValueMust(types.ObjectType{AttrTypes: tmpfsAttr}, flattenContainerTmpfs(ctx, lp.Tmpfs)) + } else { + linuxProps["tmpfs"] = types.ListNull(types.ObjectType{AttrTypes: tmpfsAttr}) + } + linuxProps["linux_parameters"] = types.ObjectValueMust(linuxParametersAttr, linuxProps) + return linuxProps +} + +func frameworkFlattenContainerDevices(ctx context.Context, devices []batchtypes.Device) (data []attr.Value) { + for _, dev := range devices { + var perms []string + for _, perm := range dev.Permissions { + perms = append(perms, string(perm)) + } + data = append(data, types.ObjectValueMust(deviceAttr, map[string]attr.Value{ + "host_path": flex.StringToFramework(ctx, dev.HostPath), + "container_path": flex.StringToFramework(ctx, dev.ContainerPath), + "permissions": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(perms)), + })) + } + return +} + +func flattenContainerTmpfs(ctx context.Context, tmpfs []batchtypes.Tmpfs) (data []attr.Value) { + for _, tmp := range tmpfs { + data = append(data, types.ObjectValueMust(tmpfsAttr, map[string]attr.Value{ + "container_path": flex.StringToFramework(ctx, tmp.ContainerPath), + "size": flex.Int32ToFramework(ctx, tmp.Size), + "mount_options": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(tmp.MountOptions)), + })) + } + return +} + +func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStrategy, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { + att := fwtypes.AttributeTypesMust[retryStrategy](ctx) + att["evaluate_on_exit"] = types.ListType{ElemType: types.ObjectType{AttrTypes: evaluateOnExitAttr}} + if jd == nil { + data.RetryStrategy = types.ObjectNull(att) + return + } + + var elems []attr.Value + for _, apiObject := range jd.EvaluateOnExit { + obj := map[string]attr.Value{ + "action": flex.StringToFramework(ctx, aws.String(string(apiObject.Action))), + "on_exit_code": flex.StringToFramework(ctx, apiObject.OnExitCode), + "on_reason": flex.StringToFramework(ctx, apiObject.OnReason), + "on_status_reason": flex.StringToFramework(ctx, apiObject.OnStatusReason), + } + elems = append(elems, types.ObjectValueMust(evaluateOnExitAttr, obj)) + } + + if elems == nil { + data.RetryStrategy = types.ObjectValueMust(att, map[string]attr.Value{ + "attempts": flex.Int32ToFramework(ctx, jd.Attempts), + "evaluate_on_exit": types.ListNull(types.ObjectType{AttrTypes: evaluateOnExitAttr}), + }) + } else { + data.RetryStrategy = types.ObjectValueMust(att, map[string]attr.Value{ + "attempts": flex.Int32ToFramework(ctx, jd.Attempts), + "evaluate_on_exit": types.ListValueMust(types.ObjectType{AttrTypes: evaluateOnExitAttr}, elems), + }) + } + return +} + +type dataSourceJobDefinitionData struct { + ARN fwtypes.ARN `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Revision types.Int64 `tfsdk:"revision"` + Status types.String `tfsdk:"status"` + Tags types.Map `tfsdk:"tags"` + Type types.String `tfsdk:"type"` + ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` + SchedulingPriority types.Int64 `tfsdk:"scheduling_priority"` + RetryStrategy types.Object `tfsdk:"retry_strategy"` + Timeout types.Object `tfsdk:"timeout"` + NodeProperties types.Object `tfsdk:"node_properties"` + EksProperties types.Object `tfsdk:"eks_properties"` +} + +type retryStrategy struct { + Attempts types.Int64 `tfsdk:"attempts"` + EvaluateOnExit types.Object `tfsdk:"evaluate_on_exit"` +} + +var timeoutAttr = map[string]attr.Type{ + "attempt_duration_seconds": types.Int64Type, +} + +var eksPropertiesAttr = map[string]attr.Type{ + "pod_properties": types.ObjectType{AttrTypes: eksPodPropertiesAttr}, +} + +var eksPodPropertiesAttr = map[string]attr.Type{ + "containers": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerAttr}}, + "dns_policy": types.StringType, + "host_network": types.BoolType, + "metadata": types.ObjectType{AttrTypes: eksMetadataAttr}, + "service_account_name": types.StringType, + "volumes": types.ListType{ElemType: types.ObjectType{AttrTypes: eksVolumeAttr}}, +} + +var eksContainerAttr = map[string]attr.Type{ + "args": types.ListType{ElemType: types.StringType}, + "commands": types.ListType{ElemType: types.StringType}, + "env": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}}, + "image": types.StringType, + "image_pull_policy": types.StringType, + "name": types.StringType, + "resources": types.ObjectType{AttrTypes: eksContainerResourceRequirementsAttr}, + "security_context": types.ObjectType{AttrTypes: eksContainerSecurityContextAttr}, + "volume_mounts": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}}, +} + +var eksContainerEnvironmentVariableAttr = map[string]attr.Type{ + "name": types.StringType, + "value": types.StringType, +} + +var eksContainerResourceRequirementsAttr = map[string]attr.Type{ + "limits": types.MapType{ElemType: types.StringType}, + "requests": types.MapType{ElemType: types.StringType}, +} + +var eksContainerSecurityContextAttr = map[string]attr.Type{ + "privileged": types.BoolType, + "run_as_user": types.Int64Type, + "run_as_group": types.Int64Type, + "run_as_non_root": types.BoolType, + "read_only_root_filesystem": types.BoolType, +} + +var eksContainerVolumeMountAttr = map[string]attr.Type{ + "mount_path": types.StringType, + "name": types.StringType, + "read_only": types.BoolType, +} + +var eksMetadataAttr = map[string]attr.Type{ + "labels": types.MapType{ElemType: types.StringType}, +} + +var eksVolumeAttr = map[string]attr.Type{ + "name": types.StringType, + "empty_dir": types.ObjectType{AttrTypes: eksVolumeEmptyDirAttr}, + "host_path": types.ObjectType{AttrTypes: eksVolumeHostPathAttr}, + "secret": types.ObjectType{AttrTypes: eksVolumeSecretAttr}, +} + +var eksVolumeEmptyDirAttr = map[string]attr.Type{ + "medium": types.StringType, + "size_limit": types.Int64Type, +} + +var eksVolumeHostPathAttr = map[string]attr.Type{ + "path": types.StringType, +} + +var eksVolumeSecretAttr = map[string]attr.Type{ + "secret_name": types.StringType, + "optional": types.BoolType, +} + +type frameworkNodeProperties struct { + MainNode types.Int64 `tfsdk:"main_node"` + NodeRangeProperties types.List `tfsdk:"node_range_properties"` + NumNodes types.Int64 `tfsdk:"num_nodes"` +} + +var evaluateOnExitAttr = map[string]attr.Type{ + "action": types.StringType, + "on_exit_code": types.StringType, + "on_reason": types.StringType, + "on_status_reason": types.StringType, +} + +var nodeRangePropertiesAttr = map[string]attr.Type{ + "container": types.ObjectType{AttrTypes: containerPropertiesAttr}, + "target_nodes": types.StringType, +} + +var containerPropertiesAttr = map[string]attr.Type{ + "command": types.ListType{ElemType: types.StringType}, + "environment": types.ListType{ElemType: types.ObjectType{AttrTypes: keyValuePairAttr}}, + "ephemeral_storage": types.ObjectType{AttrTypes: ephemeralStorageAttr}, + "execution_role_arn": types.StringType, + "fargate_platform_configuration": types.ObjectType{AttrTypes: fargatePlatformConfigurationAttr}, + "image": types.StringType, + "instance_type": types.StringType, + "job_role_arn": types.StringType, + "linux_parameters": types.ObjectType{AttrTypes: linuxParametersAttr}, + "log_configuration": types.ObjectType{AttrTypes: logConfigurationAttr}, + "mount_points": types.ListType{ElemType: types.ObjectType{AttrTypes: mountPointAttr}}, + "network_configuration": types.ObjectType{AttrTypes: networkConfigurationAttr}, + "privileged": types.BoolType, + "readonly_root_filesystem": types.BoolType, + "resource_requirements": types.ListType{ElemType: types.ObjectType{AttrTypes: resourceRequirementsAttr}}, + "runtime_platform": types.ObjectType{AttrTypes: runtimePlatformAttr}, + "secrets": types.ListType{ElemType: types.ObjectType{AttrTypes: secretAttr}}, + "ulimits": types.ListType{ElemType: types.ObjectType{AttrTypes: ulimitsAttr}}, + "user": types.StringType, + "volumes": types.ListType{ElemType: types.ObjectType{AttrTypes: volumeAttr}}, +} + +var keyValuePairAttr = map[string]attr.Type{ + "name": types.StringType, + "value": types.StringType, +} + +var ephemeralStorageAttr = map[string]attr.Type{ + "size_in_gib": types.Int64Type, +} + +var fargatePlatformConfigurationAttr = map[string]attr.Type{ + "platform_version": types.StringType, +} + +var linuxParametersAttr = map[string]attr.Type{ + "devices": types.ListType{ElemType: types.ObjectType{AttrTypes: deviceAttr}}, + "init_process_enabled": types.BoolType, + "max_swap": types.Int64Type, + "shared_memory_size": types.Int64Type, + "swappiness": types.Int64Type, + "tmpfs": types.ListType{ElemType: types.ObjectType{AttrTypes: tmpfsAttr}}, +} + +var logConfigurationAttr = map[string]attr.Type{ + "options": types.MapType{ElemType: types.StringType}, + "secret_options": types.ListType{ElemType: types.ObjectType{AttrTypes: secretAttr}}, + "log_driver": types.StringType, +} +var tmpfsAttr = map[string]attr.Type{ + "container_path": types.StringType, + "mount_options": types.ListType{ElemType: types.StringType}, + "size": types.Int64Type, +} + +var deviceAttr = map[string]attr.Type{ + "container_path": types.StringType, + "host_path": types.StringType, + "permissions": types.ListType{ElemType: types.StringType}, +} + +var mountPointAttr = map[string]attr.Type{ + "container_path": types.StringType, + "read_only": types.BoolType, + "source_volume": types.StringType, +} + +var networkConfigurationAttr = map[string]attr.Type{ + "assign_public_ip": types.StringType, +} + +var resourceRequirementsAttr = map[string]attr.Type{ + "type": types.StringType, + "value": types.StringType, +} + +var runtimePlatformAttr = map[string]attr.Type{ + "cpu_architecture": types.StringType, + "operating_system_family": types.StringType, +} + +var secretAttr = map[string]attr.Type{ + "name": types.StringType, + "value_from": types.StringType, +} + +var ulimitsAttr = map[string]attr.Type{ + "hard_limit": types.Int64Type, + "name": types.StringType, + "soft_limit": types.Int64Type, +} + +var volumeAttr = map[string]attr.Type{ + "efs_volume_configuration": types.ObjectType{AttrTypes: efsVolumeConfigurationAttr}, + "host": types.ObjectType{AttrTypes: hostAttr}, + "name": types.StringType, +} + +var efsVolumeConfigurationAttr = map[string]attr.Type{ + "authorization_config": types.ObjectType{AttrTypes: authorizationConfigAttr}, + "file_system_id": types.StringType, + "root_directory": types.StringType, + "transit_encryption": types.StringType, + "transit_encryption_port": types.Int64Type, +} + +var authorizationConfigAttr = map[string]attr.Type{ + "access_point_id": types.StringType, + "iam": types.StringType, +} + +var hostAttr = map[string]attr.Type{ + "source_path": types.StringType, +} diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go new file mode 100644 index 00000000000..5a1f91ba613 --- /dev/null +++ b/internal/service/batch/job_definition_data_source_test.go @@ -0,0 +1,257 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/batch/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfbatch "github.com/hashicorp/terraform-provider-aws/internal/service/batch" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { + ctx := acctest.Context(t) + + var jd types.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + resourceName := "aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicName(rName, "1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), + acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), + ), + }, + { + // specify revision + Config: testAccJobDefinitionDataSourceConfig_basicNameRevision(rName, "2", 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { + ctx := acctest.Context(t) + + var jd types.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), + acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), + ), + }, + { + Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { + ctx := acctest.Context(t) + + var jd types.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARNNode(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.main_node", "0"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.#", "2"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.0.container.image", "busybox"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { + ctx := acctest.Context(t) + + var jd types.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARNEKS(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), + resource.TestCheckResourceAttr(dataSourceName, "type", "container"), + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.0.image", "public.ecr.aws/amazonlinux/amazonlinux:1"), + ), + }, + }, + }) +} + +func testAccCheckJobDefinitionV2Exists(ctx context.Context, n string, jd *types.JobDefinition) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Batch Job Queue ID is set") + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).BatchClient(ctx) + + jobDefinition, err := tfbatch.FindJobDefinitionV2ByARN(ctx, conn, rs.Primary.ID) + + if err != nil { + return err + } + + *jd = *jobDefinition + + return nil + } +} + +func testAccJobDefinitionDataSourceConfig_basicARN(rName string, increment string) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn + + depends_on = [aws_batch_job_definition.test] +} +`) +} + +func testAccJobDefinitionDataSourceConfig_basicName(rName string, increment string) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + fmt.Sprintf(` +data "aws_batch_job_definition" "test" { + name = %[1]q + + depends_on = [aws_batch_job_definition.test] +} +`, rName, increment)) +} + +func testAccJobDefinitionDataSourceConfig_basicNameRevision(rName string, increment string, revision int) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + fmt.Sprintf(` +data "aws_batch_job_definition" "test" { + name = %[1]q + revision = %[2]d + + depends_on = [aws_batch_job_definition.test] +} +`, rName, revision)) +} + +func testAccJobDefinitionDataSourceConfig_container(rName string, increment string) string { + return fmt.Sprintf(` +resource "aws_batch_job_definition" "test" { + container_properties = jsonencode({ + command = ["echo", "test%[2]s"] + image = "busybox" + memory = 128 + vcpus = 1 + }) + name = %[1]q + type = "container" + retry_strategy { + attempts = 10 + } +} +`, rName, increment) +} + +func testAccJobDefinitionDataSourceConfig_basicARNNode(rName string) string { + return acctest.ConfigCompose( + testAccJobDefinitionConfig_NodeProperties(rName), ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn + depends_on = [aws_batch_job_definition.test] +}`, + ) +} + +func testAccJobDefinitionDataSourceConfig_basicARNEKS(rName string) string { + return acctest.ConfigCompose( + testAccJobDefinitionConfig_EKSProperties_basic(rName), ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn + depends_on = [aws_batch_job_definition.test] +}`, + ) +} diff --git a/internal/service/batch/service_package_gen.go b/internal/service/batch/service_package_gen.go index f1eae349c43..560615d2be7 100644 --- a/internal/service/batch/service_package_gen.go +++ b/internal/service/batch/service_package_gen.go @@ -5,6 +5,8 @@ package batch import ( "context" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" @@ -16,7 +18,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceJobDefinition, + Name: "Job Definition", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { @@ -88,6 +95,17 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*b return batch_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*batch_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) + + return batch_sdkv2.NewFromConfig(cfg, func(o *batch_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil +} + func ServicePackage(ctx context.Context) conns.ServicePackage { return &servicePackage{} } diff --git a/names/data/names_data.csv b/names/data/names_data.csv index d84ac131e16..ff31c70bed9 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -34,7 +34,7 @@ autoscaling-plans,autoscalingplans,autoscalingplans,autoscalingplans,,autoscalin ,,,,,,,,,,,,,,,,,Backint Agent for SAP HANA,AWS,x,,,,,,,,,No SDK support backup,backup,backup,backup,,backup,,,Backup,Backup,,1,,,aws_backup_,,backup_,Backup,AWS,,,,,,,Backup,ListBackupPlans,, backup-gateway,backupgateway,backupgateway,backupgateway,,backupgateway,,,BackupGateway,BackupGateway,,1,,,aws_backupgateway_,,backupgateway_,Backup Gateway,AWS,,x,,,,,Backup Gateway,,, -batch,batch,batch,batch,,batch,,,Batch,Batch,,1,,,aws_batch_,,batch_,Batch,AWS,,,,,,,Batch,ListJobs,, +batch,batch,batch,batch,,batch,,,Batch,Batch,,1,2,,aws_batch_,,batch_,Batch,AWS,,,,,,,Batch,ListJobs,, bedrock,bedrock,bedrock,bedrock,,bedrock,,,Bedrock,Bedrock,,,2,,aws_bedrock_,,bedrock_,Amazon Bedrock,Amazon,,,,,,,Bedrock,ListFoundationModels,, bedrock-agent,bedrockagent,bedrockagent,bedrockagent,,bedrockagent,,,BedrockAgent,BedrockAgent,,,2,,aws_bedrockagent_,,bedrock_agent_,Agents for Amazon Bedrock,Amazon,,,,,,,Bedrock Agent,ListAgents,, billingconductor,billingconductor,billingconductor,,,billingconductor,,,BillingConductor,BillingConductor,,1,,,aws_billingconductor_,,billingconductor_,Billing Conductor,AWS,,x,,,,,billingconductor,,, @@ -388,4 +388,4 @@ workspaces-web,workspacesweb,workspacesweb,workspacesweb,,workspacesweb,,,WorkSp xray,xray,xray,xray,,xray,,,XRay,XRay,,,2,,aws_xray_,,xray_,X-Ray,AWS,,,,,,,XRay,ListResourcePolicies,, verifiedpermissions,verifiedpermissions,verifiedpermissions,verifiedpermissions,,verifiedpermissions,,,VerifiedPermissions,VerifiedPermissions,,,2,,aws_verifiedpermissions_,,verifiedpermissions_,Verified Permissions,Amazon,,,,,,,VerifiedPermissions,ListPolicyStores,, codecatalyst,codecatalyst,codecatalyst,codecatalyst,,codecatalyst,,,CodeCatalyst,CodeCatalyst,,,2,,aws_codecatalyst_,,codecatalyst_,CodeCatalyst,Amazon,,,,,,,CodeCatalyst,ListAccessTokens,, -mediapackagev2,mediapackagev2,mediapackagev2,mediapackagev2,,mediapackagev2,,,MediaPackageV2,MediaPackageV2,,,2,aws_media_packagev2_,aws_mediapackagev2_,,media_packagev2_,Elemental MediaPackage Version 2,AWS,,,,,,,MediaPackageV2,ListChannelGroups,, \ No newline at end of file +mediapackagev2,mediapackagev2,mediapackagev2,mediapackagev2,,mediapackagev2,,,MediaPackageV2,MediaPackageV2,,,2,aws_media_packagev2_,aws_mediapackagev2_,,media_packagev2_,Elemental MediaPackage Version 2,AWS,,,,,,,MediaPackageV2,ListChannelGroups,, diff --git a/names/names.go b/names/names.go index a101772e233..a8515b8d88e 100644 --- a/names/names.go +++ b/names/names.go @@ -33,6 +33,7 @@ const ( AppRunnerEndpointID = "apprunner" AthenaEndpointID = "athena" AuditManagerEndpointID = "auditmanager" + BatchEndpointID = "batch" BedrockEndpointID = "bedrock" BudgetsEndpointID = "budgets" ChimeSDKVoiceEndpointID = "voice-chime" diff --git a/website/docs/d/batch_job_definition.html.markdown b/website/docs/d/batch_job_definition.html.markdown new file mode 100644 index 00000000000..b45a798013f --- /dev/null +++ b/website/docs/d/batch_job_definition.html.markdown @@ -0,0 +1,274 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_definition" +description: |- + Terraform data source for managing an AWS Batch Job Definition. +--- + +# Data Source: aws_batch_job_definition + +Terraform data source for managing an AWS Batch Job Definition. + +## Example Usage + +### Lookup via Arn + +```terraform +data "aws_batch_job_definition" "arn" { + arn = "arn:aws:batch:us-east-1:012345678910:job-definition/example" +} +``` + +### Lookup via Name + +```terraform +data "aws_batch_job_definition" "name" { + name = "example" + revision = 2 +} +``` + +## Argument Reference + +The following arguments are optional: + +* `arn` - ARN of the Job Definition. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `revision` - The revision of the job definition. +* `name` - The name of the job definition to register. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). +* `status` - The status of the job definition. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `container_orchestration_type` - The orchestration type of the compute environment. +* `scheduling_priority` - The scheduling priority for jobs that are submitted with this job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. +* `id` - The ARN +* `eks_properties` - An [object](#eks_properties) with various properties that are specific to Amazon EKS based jobs. This must not be specified for Amazon ECS based job definitions. +* `node_properties` - An [object](#node_properties) with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the AWS Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties. +* `retry_strategy` - The [retry strategy](#retry_strategy) to use for failed jobs that are submitted with this job definition. Any retry strategy that's specified during a SubmitJob operation overrides the retry strategy defined here. If a job is terminated due to a timeout, it isn't retried. +* `timeout` - The [timeout configuration](#timeout) for jobs that are submitted with this job definition, after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. + +### eks_properties + +* `pod_properties` - The [properties](#pod_properties) for the Kubernetes pod resources of a job. + +### pod_properties + +* `dns_policy` - The DNS policy for the pod. The default value is ClusterFirst. If the hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. +* `host_network` - Indicates if the pod uses the hosts' network IP address. The default value is true. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. +* `service_account_name` - The name of the service account that's used to run the pod. +* `containers` - The properties of the container that's used on the Amazon EKS pod. Array of [EksContainer](#container) objects. +* `metadata` - [Metadata](#eks_metadata) about the Kubernetes pod. +* `volumes` - Specifies the volumes for a job definition that uses Amazon EKS resources. Array of [EksVolume](#eks_volumes) objects. + +### eks_container + +* `args` - An array of arguments to the entrypoint +* `commands` - The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. +* `env` - The environment variables to pass to a container. Array of [EksContainerEnvironmentVariable](#eks_environment) objects. +* `image` - The Docker image used to start the container. +* `image_pull_policy` - The image pull policy for the container. +* `name` - The name of the container. +* `resources` - The type and amount of [resources](#eks_resources) to assign to a container. +* `security_context` - The [security context](#eks_security_context) for a job. +* `volume_mounts` - The [volume mounts](#eks_volume_mounts) for the container. + +### eks_metadata + +* `labels` - Key-value pairs used to identify, sort, and organize cube resources. + +### eks_volumes + +* `name` - The name of the volume. The name must be allowed as a DNS subdomain name. +* `empty_dir` - Specifies the configuration of a Kubernetes [emptyDir volume](#eks_volume_empty_dir). +* `host_path` - Specifies the configuration of a Kubernetes [hostPath volume](#eks_volume_host_path). +* `secret` - Specifies the configuration of a Kubernetes [secret volume](#eks_volume_secret). + +### eks_volume_empty_dir + +* `medium` - The medium to store the volume. +* `size_limit` - The maximum size of the volume. By default, there's no maximum size defined. + +### eks_volume_host_path + +* `path` - The path of the file or directory on the host to mount into containers on the pod. + +### eks_volume_secret + +* `secret_name` - The name of the secret. The name must be allowed as a DNS subdomain name +* `optional` - Specifies whether the secret or the secret's keys must be defined. + +### eks_environment + +* `name` - The name of the environment variable. +* `value` - The value of the environment variable. + +### eks_resources + +* `limits` - The type and quantity of the resources to reserve for the container. +* `requests` - The type and quantity of the resources to request for the container. + +### eks_security_context + +* `privileged` - When this parameter is true, the container is given elevated permissions on the host container instance. The level of permissions are similar to the root user permissions. The default value is false. +* `read_only_root_filesystem` - When this parameter is true, the container is given read-only access to its root file system. The default value is false. +* `run_as_user` - When this parameter is specified, the container is run as the specified user ID (uid). If this parameter isn't specified, the default is the user that's specified in the image metadata. +* `run_as_group` - When this parameter is specified, the container is run as the specified group ID (gid). If this parameter isn't specified, the default is the group that's specified in the image metadata. +* `run_as_non_root` - When this parameter is specified, the container is run as a user with a uid other than 0. If this parameter isn't specified, so such rule is enforced. + +### eks_volume_mounts + +* `mount_path` - The path on the container where the volume is mounted. +* `name` - The name the volume mount. +* `read_only` - If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. + +### node_properties + +* `main_node` - Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer than the number of nodes. +* `node_range_properties` - A list of node ranges and their [properties](#node_range_properties) that are associated with a multi-node parallel job. +* `num_nodes` - The number of nodes that are associated with a multi-node parallel job. + +### node_range_properties + +* `target_nodes` - The range of nodes, using node index values. A range of 0:3 indicates nodes with index values of 0 through 3. I +* `container` - The [container details](#container) for the node range. + +### container + +* `command` - The command that's passed to the container. +* `environment` - The [environment](#environment) variables to pass to a container. +* `ephemeral_storage` - The amount of [ephemeral storage](#ephemeral_storage) to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. +* `execution_role_arn` - The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. +* `fargate_platform_configuration` - The [platform configuration](#fargate_platform_configuration) for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter. +* `image` - The image used to start a container. +* `instance_type` - The instance type to use for a multi-node parallel job. +* `job_role_arn` - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. +* `linux_parameters` - [Linux-specific modifications](#linux_parameters) that are applied to the container. +* `log_configuration` - The [log configuration](#log_configuration) specification for the container. +* `mount_points` - The [mount points](#mount_points) for data volumes in your container. +* `network_configuration` - The [network configuration](#network_configuration) for jobs that are running on Fargate resources. +* `privileged` - When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). +* `readonly_root_filesystem` - When this parameter is true, the container is given read-only access to its root file system. +* `resource_requirements` - The type and amount of [resources](#resource_requirements) to assign to a container. +* `runtime_platform` - An [object](#runtime_platform) that represents the compute environment architecture for AWS Batch jobs on Fargate. +* `secrets` - The [secrets](#secrets) for the container. +* `ulimits` - A list of [ulimits](#ulimits) to set in the container. +* `user` - The user name to use inside the container. +* `volumes` - A list of data [volumes](#volumes) used in a job. + +### environment + +* `name` - The name of the key-value pair. +* `value` - The value of the key-value pair. + +### ephemeral_storage + +* `size_in_gb` - The total amount, in GiB, of ephemeral storage to set for the task. + +### fargate_platform_configuration + +* `platform_version` - The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. + +### linux_parameters + +* `init_process_enabled` - If true, run an init process inside the container that forwards signals and reaps processes. +* `max_swap` - The total amount of swap memory (in MiB) a container can use. +* `shared_memory_size` - The value for the size (in MiB) of the `/dev/shm` volume. +* `swappiness` - You can use this parameter to tune a container's memory swappiness behavior. +* `devices` - Any of the [host devices](#devices) to expose to the container. +* `tmpfs` - The container path, mount options, and size (in MiB) of the [tmpfs](#tmpfs) mount. + +### log_configuration + +* `options` - The configuration options to send to the log driver. +* `log_driver` - The log driver to use for the container. +* `secret_options` - The secrets to pass to the log configuration. + +### network_configuration + +* `assign_public_ip` - Indicates whether the job has a public IP address. + +### mount_points + +* `container_path` - The path on the container where the host volume is mounted. +* `read_only` - If this value is true, the container has read-only access to the volume. +* `source_volume` - The name of the volume to mount. + +### resource_requirements + +* `type` - The type of resource to assign to a container. The supported resources include `GPU`, `MEMORY`, and `VCPU`. +* `value` - The quantity of the specified resource to reserve for the container. + +### secrets + +* `name` - The name of the secret. +* `value_from` - The secret to expose to the container. + +### ulimits + +* `hard_limit` - The hard limit for the ulimit type. +* `name` - The type of the ulimit. +* `soft_limit` - The soft limit for the ulimit type. + +### runtime_platform + +* `cpu_architecture` - The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64. +* `operating_system_family` - The operating system for the compute environment. V + +### secret_options + +* `name` - The name of the secret. +* `value_from` - The secret to expose to the container. The supported values are either the full Amazon Resource Name (ARN) of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store. + +### devices + +* `host_path` - The path for the device on the host container instance. +* `container_path` - The path inside the container that's used to expose the host device. By default, the hostPath value is used. +* `permissions` - The explicit permissions to provide to the container for the device. + +### tmpfs + +* `container_path` - The absolute file path in the container where the tmpfs volume is mounted. +* `size` - The size (in MiB) of the tmpfs volume. +* `mount_options` - The list of tmpfs volume mount options. + +### volumes + +* `name` - The name of the volume. +* `host` - The contents of the host parameter determine whether your data volume persists on the host container instance and where it's stored. +* `efs_volume_configuration` - This [parameter](#efs_volume_configuration) is specified when you're using an Amazon Elastic File System file system for job storage. + +### host + +* `source_path` - The path on the host container instance that's presented to the container. + +### efs_volume_configuration + +* `file_system_id` - The Amazon EFS file system ID to use. +* `root_directory` - The directory within the Amazon EFS file system to mount as the root directory inside the host. +* `transit_encryption` - Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server +* `transit_encryption_port` - The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. +* `authorization_config` - The [authorization configuration](#authorization_config) details for the Amazon EFS file system. + +### authorization_config + +* `access_point_id` - The Amazon EFS access point ID to use. +* `iam` - Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. + +### retry_strategy + +* `attempts` - The number of times to move a job to the RUNNABLE status. +* `evaluate_on_exit` - Array of up to 5 [objects](#evaluate_on_exit) that specify the conditions where jobs are retried or failed. + +### evaluate_on_exit + +* `action` - Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values aren't case sensitive. +* `on_exit_code` - Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. +* `on_reason` - Contains a glob pattern to match against the Reason returned for a job. +* `on_status_reason` - Contains a glob pattern to match against the StatusReason returned for a job. + +### timeout + +* `attempt_duration_seconds` - The job timeout time (in seconds) that's measured from the job attempt's startedAt timestamp. From 62e943a66c3e868b3d511198c938fe2a1015b9e1 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Tue, 19 Dec 2023 19:12:52 -0500 Subject: [PATCH 02/17] CHANGELOG --- .changelog/31663.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/31663.txt diff --git a/.changelog/31663.txt b/.changelog/31663.txt new file mode 100644 index 00000000000..1cdf96f375a --- /dev/null +++ b/.changelog/31663.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +batch_definition +``` From cf2e27437fafec34d9aa3f53179ab33db6ea4989 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Tue, 19 Dec 2023 19:14:19 -0500 Subject: [PATCH 03/17] CHANGELOG --- .changelog/{31663.txt => 34663.txt} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .changelog/{31663.txt => 34663.txt} (63%) diff --git a/.changelog/31663.txt b/.changelog/34663.txt similarity index 63% rename from .changelog/31663.txt rename to .changelog/34663.txt index 1cdf96f375a..7caf0fb2e24 100644 --- a/.changelog/31663.txt +++ b/.changelog/34663.txt @@ -1,3 +1,3 @@ ```release-note:new-data-source -batch_definition +batch_job_definition ``` From 32a563a6639357af37ca3906ba83d76f07c91e61 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Wed, 20 Dec 2023 08:37:00 -0500 Subject: [PATCH 04/17] Add missing annotation for framework data source --- internal/service/batch/job_definition_data_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 5a09bbd2c38..180ba52c6a4 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -29,6 +29,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +// @FrameworkDataSource(name="Job Definition") func newDataSourceJobDefinition(context.Context) (datasource.DataSourceWithConfigure, error) { return &dataSourceJobDefinition{}, nil } From 1b09676034b97ebcff12ef899dd02ba77d496a02 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Wed, 20 Dec 2023 09:22:12 -0500 Subject: [PATCH 05/17] :broom: linting --- .../batch/job_definition_data_source.go | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 180ba52c6a4..64034de64c7 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -676,19 +676,35 @@ func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.Ek } if len(apiObject.PodProperties.Containers) > 0 { - props["containers"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerAttr}, frameworkFlattenEKSContainer(ctx, apiObject.PodProperties.Containers)) + container, d := types.ListValue(types.ObjectType{AttrTypes: eksContainerAttr}, frameworkFlattenEKSContainer(ctx, apiObject.PodProperties.Containers)) + diags.Append(d...) + if diags.HasError() { + return diags + } + props["containers"] = container } else { props["containers"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerAttr}) } if len(apiObject.PodProperties.Volumes) > 0 { - props["volumes"] = types.ListValueMust(types.ObjectType{AttrTypes: eksVolumeAttr}, frameworkFlattenEKSVolume(ctx, apiObject.PodProperties.Volumes)) + volume, d := types.ListValue(types.ObjectType{AttrTypes: eksVolumeAttr}, frameworkFlattenEKSVolume(ctx, apiObject.PodProperties.Volumes)) + diags.Append(d...) + if diags.HasError() { + return diags + } + props["volumes"] = volume } else { props["volumes"] = types.ListNull(types.ObjectType{AttrTypes: eksVolumeAttr}) } + + podProps, d := types.ObjectValue(eksPodPropertiesAttr, props) + diags.Append(d...) + if diags.HasError() { + return diags + } data.EksProperties = types.ObjectValueMust(eksPropertiesAttr, map[string]attr.Value{ - "pod_properties": types.ObjectValueMust(eksPodPropertiesAttr, props), + "pod_properties": podProps, }) - return + return diags } func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.EksContainer) []attr.Value { @@ -800,7 +816,7 @@ func frameworkFlattenEKSVolume(ctx context.Context, apiObject []batchtypes.EksVo } volumes = append(volumes, types.ObjectValueMust(eksVolumeAttr, volume)) } - return + return volumes } func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []batchtypes.EksContainerVolumeMount) (volumeMounts []attr.Value) { @@ -1078,7 +1094,7 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStra "evaluate_on_exit": types.ListValueMust(types.ObjectType{AttrTypes: evaluateOnExitAttr}, elems), }) } - return + return diags } type dataSourceJobDefinitionData struct { From d6dbd981a4594f790d95a7a53dc69d12c94e124c Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Wed, 20 Dec 2023 09:35:59 -0500 Subject: [PATCH 06/17] semgrep ignore --- internal/service/batch/job_definition_data_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 64034de64c7..e384b470ba7 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -571,6 +571,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR ) return } + // nosemgrep:ci.semgrep.aws.prefer-pointer-conversion-assignment jd = *out } From 0d685d9c9e5c7ce585760729eceff1bd1dc52a09 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Wed, 20 Dec 2023 10:23:36 -0500 Subject: [PATCH 07/17] more linting --- internal/service/batch/findv2.go | 4 +-- .../batch/job_definition_data_source.go | 29 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/internal/service/batch/findv2.go b/internal/service/batch/findv2.go index 812ef300cd6..439af4f2894 100644 --- a/internal/service/batch/findv2.go +++ b/internal/service/batch/findv2.go @@ -46,8 +46,8 @@ func ListJobDefinitionsV2ByNameWithStatus(ctx context.Context, conn *batch.Clien out = append(out, page.JobDefinitions...) } - if out == nil || len(out) == 0 { - return nil, tfresource.NewEmptyResultError(input) + if len(out) == 0 { + return out, tfresource.NewEmptyResultError(input) } return out, nil diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index e384b470ba7..891bd97c9c6 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -612,7 +612,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR } if data.Revision.IsNull() { - var latestRevision int32 = 0 + var latestRevision int32 for _, _jd := range jds { if aws.Int32Value(_jd.Revision) > latestRevision { latestRevision = aws.Int32Value(_jd.Revision) @@ -660,7 +660,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.EksProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { if apiObject == nil { data.EksProperties = types.ObjectNull(eksPropertiesAttr) - return + return diags } props := map[string]attr.Value{ "dns_policy": flex.StringToFramework(ctx, apiObject.PodProperties.DnsPolicy), @@ -708,8 +708,7 @@ func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.Ek return diags } -func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.EksContainer) []attr.Value { - var containers []attr.Value +func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.EksContainer) (containers []attr.Value) { for _, c := range apiObject { props := map[string]attr.Value{ "image": flex.StringToFramework(ctx, c.Image), @@ -907,10 +906,10 @@ func frameworkFlattenContainerProperties(ctx context.Context, c *batchtypes.Cont } else { containerProps["environment"] = types.ListNull(types.ObjectType{AttrTypes: keyValuePairAttr}) } - if len(c.Environment) > 0 { + if len(c.MountPoints) > 0 { var mountPoints []attr.Value for _, m := range c.MountPoints { - mountPoints = append(environment, types.ObjectValueMust(mountPointAttr, map[string]attr.Value{ + mountPoints = append(mountPoints, types.ObjectValueMust(mountPointAttr, map[string]attr.Value{ "container_path": flex.StringToFramework(ctx, m.ContainerPath), "read_only": flex.BoolToFramework(ctx, m.ReadOnly), "source_volume": flex.StringToFramework(ctx, m.SourceVolume), @@ -1008,10 +1007,10 @@ func frameworkFlattenContainerProperties(ctx context.Context, c *batchtypes.Cont "iam": flex.StringToFramework(ctx, aws.String(string(vol.EfsVolumeConfiguration.AuthorizationConfig.Iam))), }), }) - } volumes = append(volumes, types.ObjectValueMust(volumeAttr, volume)) } + containerProps["volumes"] = types.ListValueMust(types.ObjectType{AttrTypes: volumeAttr}, volumes) } else { containerProps["volumes"] = types.ListNull(types.ObjectType{AttrTypes: volumeAttr}) } @@ -1070,7 +1069,7 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStra att["evaluate_on_exit"] = types.ListType{ElemType: types.ObjectType{AttrTypes: evaluateOnExitAttr}} if jd == nil { data.RetryStrategy = types.ObjectNull(att) - return + return diags } var elems []attr.Value @@ -1081,7 +1080,12 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStra "on_reason": flex.StringToFramework(ctx, apiObject.OnReason), "on_status_reason": flex.StringToFramework(ctx, apiObject.OnStatusReason), } - elems = append(elems, types.ObjectValueMust(evaluateOnExitAttr, obj)) + elem, d := types.ObjectValue(evaluateOnExitAttr, obj) + diags.Append(d...) + if diags.HasError() { + return diags + } + elems = append(elems, elem) } if elems == nil { @@ -1090,9 +1094,14 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStra "evaluate_on_exit": types.ListNull(types.ObjectType{AttrTypes: evaluateOnExitAttr}), }) } else { + eval, d := types.ListValue(types.ObjectType{AttrTypes: evaluateOnExitAttr}, elems) + diags.Append(d...) + if diags.HasError() { + return diags + } data.RetryStrategy = types.ObjectValueMust(att, map[string]attr.Value{ "attempts": flex.Int32ToFramework(ctx, jd.Attempts), - "evaluate_on_exit": types.ListValueMust(types.ObjectType{AttrTypes: evaluateOnExitAttr}, elems), + "evaluate_on_exit": eval, }) } return diags From 4d38da8f2713e7531a7c63c07674143e445a95eb Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Wed, 3 Jan 2024 15:49:45 -0500 Subject: [PATCH 08/17] arn_prefix --- internal/service/batch/job_definition_data_source.go | 9 +++++++++ .../service/batch/job_definition_data_source_test.go | 1 + 2 files changed, 10 insertions(+) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 891bd97c9c6..c991e1f1a91 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -6,6 +6,7 @@ package batch import ( "context" "fmt" + "strings" "github.com/aws/aws-sdk-go-v2/service/batch" batchtypes "github.com/aws/aws-sdk-go-v2/service/batch/types" @@ -62,6 +63,10 @@ func (d *dataSourceJobDefinition) Schema(ctx context.Context, req datasource.Sch Optional: true, CustomType: fwtypes.ARNType, }, + "arn_prefix": schema.StringAttribute{ + Optional: true, + CustomType: fwtypes.ARNType, + }, "container_orchestration_type": schema.StringAttribute{ Computed: true, }, @@ -624,8 +629,11 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR // These fields don't have the same name as their api data.ARN = flex.StringToFrameworkARN(ctx, jd.JobDefinitionArn) + arnPrefix := strings.TrimSuffix(aws.StringValue(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.Int32Value(jd.Revision))) + data.ARNPrefix = flex.StringToFrameworkARN(ctx, aws.String(arnPrefix)) data.ID = flex.StringToFramework(ctx, jd.JobDefinitionArn) data.Name = flex.StringToFramework(ctx, jd.JobDefinitionName) + data.Revision = flex.Int32ToFramework(ctx, jd.Revision) data.Status = flex.StringToFramework(ctx, jd.Status) data.Type = flex.StringToFramework(ctx, jd.Type) @@ -1109,6 +1117,7 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStra type dataSourceJobDefinitionData struct { ARN fwtypes.ARN `tfsdk:"arn"` + ARNPrefix fwtypes.ARN `tfsdk:"arn_prefix"` ID types.String `tfsdk:"id"` Name types.String `tfsdk:"name"` Revision types.Int64 `tfsdk:"revision"` diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go index 5a1f91ba613..5376abf121f 100644 --- a/internal/service/batch/job_definition_data_source_test.go +++ b/internal/service/batch/job_definition_data_source_test.go @@ -83,6 +83,7 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), + acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn_prefix", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s`, rName))), ), }, { From 2cc52d905d8cd95ed47a9e35bbd8c22718138662 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 15 Feb 2024 09:53:23 -0500 Subject: [PATCH 09/17] go generate call --- .../batch/service_endpoints_gen_test.go | 67 ++++++++++++++----- 1 file changed, 52 insertions(+), 15 deletions(-) diff --git a/internal/service/batch/service_endpoints_gen_test.go b/internal/service/batch/service_endpoints_gen_test.go index 3f32472c2d7..f4fc8850f36 100644 --- a/internal/service/batch/service_endpoints_gen_test.go +++ b/internal/service/batch/service_endpoints_gen_test.go @@ -4,15 +4,16 @@ package batch_test import ( "context" + "errors" "fmt" - "net/url" "os" "path/filepath" "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -202,33 +203,69 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }, } - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase + t.Run("v1", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase - t.Run(name, func(t *testing.T) { - testEndpointCase(t, region, testcase, callService) - }) - } + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV1) + }) + } + }) + + t.Run("v2", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV2) + }) + } + }) } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := batch_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(batch_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), batch_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI.String() +} + +func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { + t.Helper() + + var endpoint string - if url.Path == "" { - url.Path = "/" + client := meta.BatchClient(ctx) + + _, err := client.ListJobs(ctx, &batch_sdkv2.ListJobsInput{}, + func(opts *batch_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } - return url.String() + return endpoint } -func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { +func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { t.Helper() client := meta.BatchConn(ctx) From f205f9895cac68b55e2a6fb8d673bc2ede4eca42 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 15 Feb 2024 11:54:42 -0500 Subject: [PATCH 10/17] Run 'go get github.com/aws/aws-sdk-go-v2/service/batch@v1.32.0 && go mod tidy'. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 91b69a2aad7..9e84e733bd2 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/apprunner v1.27.0 github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 - github.com/aws/aws-sdk-go-v2/service/batch v1.30.5 + github.com/aws/aws-sdk-go-v2/service/batch v1.32.0 github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 github.com/aws/aws-sdk-go-v2/service/budgets v1.21.0 diff --git a/go.sum b/go.sum index 90965f91d6f..c75a83a999f 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 h1:oVrFdlLcYETrVftzF0Q/Dr0tf github.com/aws/aws-sdk-go-v2/service/athena v1.39.0/go.mod h1:PPlSmhFoI4r5BGLB+6YDUHSU3E77brazZXLcj2DeQZQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 h1:wW06a5cOpVYJ1NrjmcKpk54xqUYK2PbL0ttOcXKyBrQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0/go.mod h1:KPniIDEIjjhk8v1hkQeBeUcSPS0i/MAGXt80hUr6Cbc= -github.com/aws/aws-sdk-go-v2/service/batch v1.30.5 h1:plf1gPkD4t7yFygClkfxYREpDnLu/tub6tJO6U31TKU= -github.com/aws/aws-sdk-go-v2/service/batch v1.30.5/go.mod h1:PueWUeJBztSAvgaTrbefYvj+kOhBbjE2nia473vk2L8= +github.com/aws/aws-sdk-go-v2/service/batch v1.32.0 h1:KIV3V/Edj0N7dG38u6wjq6zytO6prVjDewT88KzXlVE= +github.com/aws/aws-sdk-go-v2/service/batch v1.32.0/go.mod h1:75qh07u8lNpPtIoUlKWN5RCDga/yN6PDJyPVFLlVaMU= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 h1:Eah+mRIMPbq3KdgLpUT44nCJi7cECjy5U2fgFO0jiiQ= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0/go.mod h1:orxULvnjYi9X3Na7eGy27KD6uOE8vDvyJCNJejmU92E= github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 h1:pAaehMb08sPnGBvPnm0paurEj6EtjCEwxaw8WZN51LA= From 8923b6280b10c3c5747fee36dddb573eeb27bef9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 15 Feb 2024 12:07:18 -0500 Subject: [PATCH 11/17] Tweak CHANGELOG entry. --- .changelog/34663.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34663.txt b/.changelog/34663.txt index 7caf0fb2e24..5630e263573 100644 --- a/.changelog/34663.txt +++ b/.changelog/34663.txt @@ -1,3 +1,3 @@ ```release-note:new-data-source -batch_job_definition +aws_batch_job_definition ``` From 8b8a444f34636dd43fe2dabdde36af9ddbe36143 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 15 Feb 2024 17:06:40 -0500 Subject: [PATCH 12/17] d/aws_batch_job_definition: Add structures in preparation for AutoFlEx. --- internal/service/batch/enum.go | 10 +- .../batch/job_definition_data_source.go | 367 +++++++++++++++--- internal/service/batch/service_package_gen.go | 2 +- 3 files changed, 314 insertions(+), 65 deletions(-) diff --git a/internal/service/batch/enum.go b/internal/service/batch/enum.go index c75ab45ab20..c21f1200285 100644 --- a/internal/service/batch/enum.go +++ b/internal/service/batch/enum.go @@ -4,13 +4,13 @@ package batch const ( - JobDefinitionStatusInactive string = "INACTIVE" - JobDefinitionStatusActive string = "ACTIVE" + jobDefinitionStatusActive string = "ACTIVE" + jobDefinitionStatusInactive string = "INACTIVE" ) -func JobDefinitionStatus_Values() []string { +func jobDefinitionStatus_Values() []string { return []string{ - JobDefinitionStatusInactive, - JobDefinitionStatusActive, + jobDefinitionStatusInactive, + jobDefinitionStatusActive, } } diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index c991e1f1a91..852d9286958 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/service/batch" - batchtypes "github.com/aws/aws-sdk-go-v2/service/batch/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/batch/types" "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" @@ -31,63 +31,92 @@ import ( ) // @FrameworkDataSource(name="Job Definition") -func newDataSourceJobDefinition(context.Context) (datasource.DataSourceWithConfigure, error) { - return &dataSourceJobDefinition{}, nil +func newJobDefinitionDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + return &jobDefinitionDataSource{}, nil } const ( DSNameJobDefinition = "Job Definition Data Source" ) -func (r *resourceJobQueue) ConfigValidators(_ context.Context) []resource.ConfigValidator { - return []resource.ConfigValidator{ - resourcevalidator.ExactlyOneOf( - path.MatchRoot("arn"), - path.MatchRoot("name"), - ), - } -} - -type dataSourceJobDefinition struct { +type jobDefinitionDataSource struct { framework.DataSourceWithConfigure } -func (d *dataSourceJobDefinition) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name - resp.TypeName = "aws_batch_job_definition" +func (d *jobDefinitionDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + response.TypeName = "aws_batch_job_definition" } -func (d *dataSourceJobDefinition) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (d *jobDefinitionDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "arn": schema.StringAttribute{ + names.AttrARN: schema.StringAttribute{ Optional: true, CustomType: fwtypes.ARNType, }, "arn_prefix": schema.StringAttribute{ - Optional: true, - CustomType: fwtypes.ARNType, + Computed: true, }, "container_orchestration_type": schema.StringAttribute{ Computed: true, }, - "id": framework.IDAttribute(), - "name": schema.StringAttribute{ + "eks_properties": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionEKSPropertiesModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "pod_properties": fwtypes.NewListNestedObjectTypeOf[jobDefinitionEKSPodPropertiesModel](ctx), + }, + }, + }, + names.AttrID: framework.IDAttribute(), + names.AttrName: schema.StringAttribute{ Optional: true, }, - names.AttrTags: tftags.TagsAttributeComputedOnly(), + "node_properties": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionNodePropertiesModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "main_node": types.Int64Type, + "node_range_properties": fwtypes.NewListNestedObjectTypeOf[jobDefinitionNodeRangePropertyModel](ctx), + "num_nodes": types.Int64Type, + }, + }, + }, + "retry_strategy": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionRetryStrategyModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "attempts": types.Int64Type, + "evaluate_on_exit": fwtypes.NewListNestedObjectTypeOf[jobDefinitionEvaluateOnExitModel](ctx), + }, + }, + }, "revision": schema.Int64Attribute{ Optional: true, }, + "scheduling_priority": schema.Int64Attribute{ + Computed: true, + }, "status": schema.StringAttribute{ Optional: true, // Default: JobDefinitionStatusActive, // https://github.com/hashicorp/terraform-plugin-framework/issues/751#issuecomment-1799757575 Validators: []validator.String{ - stringvalidator.OneOf(JobDefinitionStatus_Values()...), + stringvalidator.OneOf(jobDefinitionStatus_Values()...), }, }, - "scheduling_priority": schema.Int64Attribute{ - Computed: true, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "timeout": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionJobTimeoutModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "attempt_duration_seconds": types.Int64Type, + }, + }, }, "type": schema.StringAttribute{ Computed: true, @@ -544,33 +573,26 @@ func (d *dataSourceJobDefinition) Schema(ctx context.Context, req datasource.Sch }, }, }, - "timeout": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "attempt_duration_seconds": schema.Int64Attribute{ - Computed: true, - }, - }, - }, }, } } -func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { conn := d.Meta().BatchClient(ctx) var data dataSourceJobDefinitionData - resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - jd := batchtypes.JobDefinition{} + jd := awstypes.JobDefinition{} if !data.ARN.IsNull() { out, err := FindJobDefinitionV2ByARN(ctx, conn, aws.StringValue(flex.StringFromFramework(ctx, data.ARN))) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), err.Error(), ) @@ -586,7 +608,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR } if data.Status.IsNull() { - active := JobDefinitionStatusActive + active := jobDefinitionStatusActive input.Status = &active } else { input.Status = flex.StringFromFramework(ctx, data.Status) @@ -595,7 +617,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR jds, err := ListJobDefinitionsV2ByNameWithStatus(ctx, conn, input) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), err.Error(), ) @@ -609,7 +631,7 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR } if jd.JobDefinitionArn == nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), fmt.Errorf("job definition revision %d not found", data.Revision.ValueInt64())), fmt.Sprintf("job definition revision %d not found with name %s", data.Revision.ValueInt64(), data.Name.String()), ) @@ -647,25 +669,34 @@ func (d *dataSourceJobDefinition) Read(ctx context.Context, req datasource.ReadR data.Timeout = types.ObjectNull(timeoutAttr) } - resp.Diagnostics.Append(frameworkFlattenNodeProperties(ctx, jd.NodeProperties, &data)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(frameworkFlattenNodeProperties(ctx, jd.NodeProperties, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(frameworkFlattenEKSproperties(ctx, jd.EksProperties, &data)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(frameworkFlattenEKSproperties(ctx, jd.EksProperties, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(frameworkFlattenRetryStrategy(ctx, jd.RetryStrategy, &data)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(frameworkFlattenRetryStrategy(ctx, jd.RetryStrategy, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *jobDefinitionDataSource) ConfigValidators(context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.ExactlyOneOf( + path.MatchRoot(names.AttrARN), + path.MatchRoot(names.AttrName), + ), + } } -func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.EksProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func frameworkFlattenEKSproperties(ctx context.Context, apiObject *awstypes.EksProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { if apiObject == nil { data.EksProperties = types.ObjectNull(eksPropertiesAttr) return diags @@ -716,7 +747,7 @@ func frameworkFlattenEKSproperties(ctx context.Context, apiObject *batchtypes.Ek return diags } -func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.EksContainer) (containers []attr.Value) { +func frameworkFlattenEKSContainer(ctx context.Context, apiObject []awstypes.EksContainer) (containers []attr.Value) { for _, c := range apiObject { props := map[string]attr.Value{ "image": flex.StringToFramework(ctx, c.Image), @@ -762,7 +793,7 @@ func frameworkFlattenEKSContainer(ctx context.Context, apiObject []batchtypes.Ek return containers } -func frameworkFlattenNodeProperties(ctx context.Context, props *batchtypes.NodeProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func frameworkFlattenNodeProperties(ctx context.Context, props *awstypes.NodeProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { att := fwtypes.AttributeTypesMust[frameworkNodeProperties](ctx) if props == nil { data.EksProperties = types.ObjectNull(att) @@ -794,7 +825,7 @@ func frameworkFlattenNodeProperties(ctx context.Context, props *batchtypes.NodeP return } -func frameworkFlattenEKSVolume(ctx context.Context, apiObject []batchtypes.EksVolume) (volumes []attr.Value) { +func frameworkFlattenEKSVolume(ctx context.Context, apiObject []awstypes.EksVolume) (volumes []attr.Value) { for _, v := range apiObject { volume := map[string]attr.Value{ "name": flex.StringToFramework(ctx, v.Name), @@ -827,7 +858,7 @@ func frameworkFlattenEKSVolume(ctx context.Context, apiObject []batchtypes.EksVo return volumes } -func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []batchtypes.EksContainerVolumeMount) (volumeMounts []attr.Value) { +func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []awstypes.EksContainerVolumeMount) (volumeMounts []attr.Value) { for _, v := range apiObject { volumeMounts = append(volumeMounts, types.ObjectValueMust(eksContainerVolumeMountAttr, map[string]attr.Value{ "mount_path": flex.StringToFramework(ctx, v.MountPath), @@ -838,7 +869,7 @@ func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []ba return } -func frameworkFlattenEKSContainerEnv(ctx context.Context, apiObject []batchtypes.EksContainerEnvironmentVariable) (env []attr.Value) { +func frameworkFlattenEKSContainerEnv(ctx context.Context, apiObject []awstypes.EksContainerEnvironmentVariable) (env []attr.Value) { for _, v := range apiObject { env = append(env, types.ObjectValueMust(eksContainerEnvironmentVariableAttr, map[string]attr.Value{ "name": flex.StringToFramework(ctx, v.Name), @@ -848,7 +879,7 @@ func frameworkFlattenEKSContainerEnv(ctx context.Context, apiObject []batchtypes return } -func frameworkFlattenContainerProperties(ctx context.Context, c *batchtypes.ContainerProperties) map[string]attr.Value { +func frameworkFlattenContainerProperties(ctx context.Context, c *awstypes.ContainerProperties) map[string]attr.Value { containerProps := map[string]attr.Value{ "command": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Command)), "execution_role_arn": flex.StringToFramework(ctx, c.ExecutionRoleArn), @@ -1025,7 +1056,7 @@ func frameworkFlattenContainerProperties(ctx context.Context, c *batchtypes.Cont return containerProps } -func frameworkFlattenContainerLinuxParameters(ctx context.Context, lp *batchtypes.LinuxParameters) map[string]attr.Value { +func frameworkFlattenContainerLinuxParameters(ctx context.Context, lp *awstypes.LinuxParameters) map[string]attr.Value { linuxProps := map[string]attr.Value{ "init_process_enabled": flex.BoolToFramework(ctx, lp.InitProcessEnabled), "max_swap": flex.Int32ToFramework(ctx, lp.MaxSwap), @@ -1046,7 +1077,7 @@ func frameworkFlattenContainerLinuxParameters(ctx context.Context, lp *batchtype return linuxProps } -func frameworkFlattenContainerDevices(ctx context.Context, devices []batchtypes.Device) (data []attr.Value) { +func frameworkFlattenContainerDevices(ctx context.Context, devices []awstypes.Device) (data []attr.Value) { for _, dev := range devices { var perms []string for _, perm := range dev.Permissions { @@ -1061,7 +1092,7 @@ func frameworkFlattenContainerDevices(ctx context.Context, devices []batchtypes. return } -func flattenContainerTmpfs(ctx context.Context, tmpfs []batchtypes.Tmpfs) (data []attr.Value) { +func flattenContainerTmpfs(ctx context.Context, tmpfs []awstypes.Tmpfs) (data []attr.Value) { for _, tmp := range tmpfs { data = append(data, types.ObjectValueMust(tmpfsAttr, map[string]attr.Value{ "container_path": flex.StringToFramework(ctx, tmp.ContainerPath), @@ -1072,7 +1103,7 @@ func flattenContainerTmpfs(ctx context.Context, tmpfs []batchtypes.Tmpfs) (data return } -func frameworkFlattenRetryStrategy(ctx context.Context, jd *batchtypes.RetryStrategy, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func frameworkFlattenRetryStrategy(ctx context.Context, jd *awstypes.RetryStrategy, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { att := fwtypes.AttributeTypesMust[retryStrategy](ctx) att["evaluate_on_exit"] = types.ListType{ElemType: types.ObjectType{AttrTypes: evaluateOnExitAttr}} if jd == nil { @@ -1348,3 +1379,221 @@ var authorizationConfigAttr = map[string]attr.Type{ var hostAttr = map[string]attr.Type{ "source_path": types.StringType, } + +type jobDefinitionEKSPropertiesModel struct { + PodProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPodPropertiesModel] `tfsdk:"pod_properties"` +} + +type jobDefinitionEKSPodPropertiesModel struct { + Containers fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerModel] `tfsdk:"containers"` + DNSPolicy types.String `tfsdk:"dns_policy"` + HostNetwork types.Bool `tfsdk:"host_network"` + Metadata fwtypes.ListNestedObjectValueOf[jobDefinitionEKSMetadataModel] `tfsdk:"metadata"` + ServiceAccountName types.Bool `tfsdk:"service_account_name"` + Volumes fwtypes.ListNestedObjectValueOf[jobDefinitionEKSVolumeModel] `tfsdk:"volumes"` +} + +type jobDefinitionEKSContainerModel struct { + Args fwtypes.ListValueOf[types.String] `tfsdk:"args"` + Command fwtypes.ListValueOf[types.String] `tfsdk:"command"` + Env fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerEnvironmentVariableModel] `tfsdk:"env"` + Image types.String `tfsdk:"image"` + ImagePullPolicy types.String `tfsdk:"image_pull_policy"` + Name types.String `tfsdk:"name"` + Resources fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerResourceRequirementsModel] `tfsdk:"resources"` + SecurityContext fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerSecurityContextModel] `tfsdk:"security_context"` + VolumeMounts fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerVolumeMountModel] `tfsdk:"volume_mounts"` +} + +type jobDefinitionEKSContainerEnvironmentVariableModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionEKSContainerResourceRequirementsModel struct { + Limits fwtypes.MapValueOf[types.String] `tfsdk:"limits"` + Requests fwtypes.MapValueOf[types.String] `tfsdk:"limits"` +} + +type jobDefinitionEKSContainerSecurityContextModel struct { + Privileged types.Bool `tfsdk:"privileged"` + ReadOnlyRootFilesystem types.Bool `tfsdk:"read_only_root_file_system"` + RunAsGroup types.Int64 `tfsdk:"run_as_group"` + RunAsNonRoot types.Bool `tfsdk:"run_as_non_root"` + RunAsUser types.Int64 `tfsdk:"run_as_user"` +} + +type jobDefinitionEKSContainerVolumeMountModel struct { + MountPath types.String `tfsdk:"mount_path"` + Name types.String `tfsdk:"name"` + ReadOnly types.Bool `tfsdk:"read_only"` +} + +type jobDefinitionEKSMetadataModel struct { + Labels fwtypes.MapValueOf[types.String] `tfsdk:"labels"` +} + +type jobDefinitionEKSVolumeModel struct { + EmptyDir fwtypes.ListNestedObjectValueOf[jobDefinitionEKSEmptyDirModel] `tfsdk:"empty_dir"` + Name types.String `tfsdk:"name"` + HostPath fwtypes.ListNestedObjectValueOf[jobDefinitionEKSHostPathModel] `tfsdk:"host_path"` + Secret fwtypes.ListNestedObjectValueOf[jobDefinitionEKSSecretModel] `tfsdk:"secret"` +} + +type jobDefinitionEKSEmptyDirModel struct { + Medium types.String `tfsdk:"medium"` + SizeLimit types.String `tfsdk:"size_limit"` +} + +type jobDefinitionEKSHostPathModel struct { + Path types.String `tfsdk:"path"` +} + +type jobDefinitionEKSSecretModel struct { + Optional types.Bool `tfsdk:"optional"` + SecretName types.String `tfsdk:"secret_name"` +} + +type jobDefinitionNodePropertiesModel struct { + MainNode types.Int64 `tfsdk:"main_node"` + NodeRangeProperties fwtypes.ListNestedObjectValueOf[jobDefinitionNodeRangePropertyModel] `tfsdk:"node_range_properties"` + NumNodes types.Int64 `tfsdk:"num_nodes"` +} + +type jobDefinitionNodeRangePropertyModel struct { + Container fwtypes.ListNestedObjectValueOf[jobDefinitionContainerPropertiesModel] `tfsdk:"container"` + TargetNodes types.String `tfsdk:"target_nodes"` +} + +type jobDefinitionContainerPropertiesModel struct { + Command fwtypes.ListValueOf[types.String] `tfsdk:"command"` + Environment fwtypes.ListNestedObjectValueOf[jobDefinitionKeyValuePairModel] `tfsdk:"environment"` + EphemeralStorage fwtypes.ListNestedObjectValueOf[jobDefinitionEphemeralStorageModel] `tfsdk:"ephemeral_storage"` + ExecutionRoleARN types.String `tfsdk:"execution_role_arn"` + FargatePlatformConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionFargatePlatformConfigurationModel] `tfsdk:"fargate_platform_configuration"` + Image types.String `tfsdk:"image"` + InstanceType types.String `tfsdk:"instance_type"` + JobRoleARN types.String `tfsdk:"job_role_arn"` + LinuxParameters fwtypes.ListNestedObjectValueOf[jobDefinitionLinuxParametersModel] `tfsdk:"linux_parameters"` + LogConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionLogConfigurationModel] `tfsdk:"log_configuration"` + MountPoints fwtypes.ListNestedObjectValueOf[jobDefinitionMountPointModel] `tfsdk:"mount_points"` + NetworkConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionNetworkConfigurationModel] `tfsdk:"network_configuration"` + Privileged types.Bool `tfsdk:"privileged"` + ReadonlyRootFilesystem types.Bool `tfsdk:"readonly_root_filesystem"` + ResourceRequirements fwtypes.ListNestedObjectValueOf[jobDefinitionResourceRequirementModel] `tfsdk:"resource_requirements"` + RuntimePlatform fwtypes.ListNestedObjectValueOf[jobDefinitionRuntimePlatformModel] `tfsdk:"runtime_platform"` + Secrets fwtypes.ListNestedObjectValueOf[jobDefinitionSecretModel] `tfsdk:"secrets"` + Ulimits fwtypes.ListNestedObjectValueOf[jobDefinitionUlimitModel] `tfsdk:"ulimits"` + User types.String `tfsdk:"user"` + Volumes fwtypes.ListNestedObjectValueOf[jobDefinitionVolumeModel] `tfsdk:"volumes"` +} + +type jobDefinitionKeyValuePairModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionEphemeralStorageModel struct { + SizeInGiB types.Int64 `tfsdk:"size_in_gib"` +} + +type jobDefinitionFargatePlatformConfigurationModel struct { + PlatformVersion types.String `tfsdk:"platform_version"` +} + +type jobDefinitionLinuxParametersModel struct { + Devices fwtypes.ListNestedObjectValueOf[jobDefinitionDeviceModel] `tfsdk:"devices"` + InitProcessEnabled types.Bool `tfsdk:"init_process_enabled"` + MaxSwap types.Int64 `tfsdk:"max_swap"` + SharedMemorySize types.Int64 `tfsdk:"shared_memory_size"` + Swappiness types.Int64 `tfsdk:"swappiness"` + Tmpfs fwtypes.ListNestedObjectValueOf[jobDefinitionTmpfsModel] `tfsdk:"tmpfs"` +} + +type jobDefinitionDeviceModel struct { + ContainerPath types.String `tfsdk:"container_path"` + HostPath types.String `tfsdk:"host_path"` + Permissions fwtypes.ListValueOf[types.String] `tfsdk:"permissions"` +} + +type jobDefinitionTmpfsModel struct { + ContainerPath types.String `tfsdk:"container_path"` + MountOptions fwtypes.ListValueOf[types.String] `tfsdk:"mount_options"` + Size types.Int64 `tfsdk:"size"` +} + +type jobDefinitionLogConfigurationModel struct { + LogDriver types.String `tfsdk:"log_driver"` + Options fwtypes.MapValueOf[types.String] `tfsdk:"options"` + SecretOptions fwtypes.ListNestedObjectValueOf[jobDefinitionSecretModel] `tfsdk:"secret_options"` +} + +type jobDefinitionSecretModel struct { + Name types.String `tfsdk:"name"` + ValueFrom types.String `tfsdk:"value_from"` +} + +type jobDefinitionMountPointModel struct { + ContainerPath types.String `tfsdk:"container_path"` + ReadOnly types.Bool `tfsdk:"read_only"` + SourceVolume types.String `tfsdk:"source_volume"` +} + +type jobDefinitionNetworkConfigurationModel struct { + AssignPublicIP types.Bool `tfsdk:"assign_public_ip"` +} + +type jobDefinitionResourceRequirementModel struct { + Type types.String `tfsdk:"type"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionRuntimePlatformModel struct { + CPUArchitecture types.String `tfsdk:"cpu_architecture"` + OperatingSystemFamily types.String `tfsdk:"operating_system_family"` +} + +type jobDefinitionUlimitModel struct { + HardLimit types.Int64 `tfsdk:"hard_limit"` + Name types.String `tfsdk:"name"` + SoftLimit types.Int64 `tfsdk:"soft_limit"` +} + +type jobDefinitionVolumeModel struct { + EFSVolumeConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionEFSVolumeConfigurationModel] `tfsdk:"efs_volume_configuration"` + Host fwtypes.ListNestedObjectValueOf[jobDefinitionHostModel] `tfsdk:"host"` + Name types.String `tfsdk:"name"` +} + +type jobDefinitionEFSVolumeConfigurationModel struct { + AuthorizationConfig fwtypes.ListNestedObjectValueOf[jobDefinitionEFSAuthorizationConfigModel] `tfsdk:"authorization_config"` + FileSystemID types.String `tfsdk:"file_system_id"` + RootDirectory types.String `tfsdk:"root_directory"` + TransitEncryption types.String `tfsdk:"transit_encryption"` + TransitEncryptionPort types.Int64 `tfsdk:"transit_encryption_port"` +} + +type jobDefinitionEFSAuthorizationConfigModel struct { + AccessPointID types.String `tfsdk:"access_point_id"` + IAM types.String `tfsdk:"iam"` +} + +type jobDefinitionHostModel struct { + SourcePath types.String `tfsdk:"source_path"` +} + +type jobDefinitionRetryStrategyModel struct { + Attempts types.Int64 `tfsdk:"attempts"` + EvaluateOnExit fwtypes.ListNestedObjectValueOf[jobDefinitionEvaluateOnExitModel] `tfsdk:"evaluate_on_exit"` +} + +type jobDefinitionEvaluateOnExitModel struct { + Action types.String `tfsdk:"action"` + OnExitCode types.String `tfsdk:"on_exit_code"` + OnReason types.String `tfsdk:"on_reason"` + OnStatusReason types.String `tfsdk:"on_status_reason"` +} + +type jobDefinitionJobTimeoutModel struct { + AttemptDurationSeconds types.Int64 `tfsdk:"attempt_duration_seconds"` +} diff --git a/internal/service/batch/service_package_gen.go b/internal/service/batch/service_package_gen.go index 560615d2be7..caf9acc0fb9 100644 --- a/internal/service/batch/service_package_gen.go +++ b/internal/service/batch/service_package_gen.go @@ -20,7 +20,7 @@ type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { return []*types.ServicePackageFrameworkDataSource{ { - Factory: newDataSourceJobDefinition, + Factory: newJobDefinitionDataSource, Name: "Job Definition", }, } From 102ba10530cddad563ece285bfaa46631f1dc122 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 18 Feb 2024 16:37:44 -0500 Subject: [PATCH 13/17] d/aws_batch_job_definition: Another step to AutoFlEx. --- .../batch/job_definition_data_source.go | 595 +++--------------- 1 file changed, 89 insertions(+), 506 deletions(-) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 852d9286958..b4e869ced64 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -6,12 +6,12 @@ package batch import ( "context" "fmt" + "slices" "strings" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/batch" awstypes "github.com/aws/aws-sdk-go-v2/service/batch/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -35,10 +36,6 @@ func newJobDefinitionDataSource(context.Context) (datasource.DataSourceWithConfi return &jobDefinitionDataSource{}, nil } -const ( - DSNameJobDefinition = "Job Definition Data Source" -) - type jobDefinitionDataSource struct { framework.DataSourceWithConfigure } @@ -102,8 +99,6 @@ func (d *jobDefinitionDataSource) Schema(ctx context.Context, request datasource }, "status": schema.StringAttribute{ Optional: true, - // Default: JobDefinitionStatusActive, - // https://github.com/hashicorp/terraform-plugin-framework/issues/751#issuecomment-1799757575 Validators: []validator.String{ stringvalidator.OneOf(jobDefinitionStatus_Values()...), }, @@ -122,505 +117,66 @@ func (d *jobDefinitionDataSource) Schema(ctx context.Context, request datasource Computed: true, }, }, - Blocks: map[string]schema.Block{ - "eks_properties": schema.SingleNestedBlock{ - Blocks: map[string]schema.Block{ - "pod_properties": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "dns_policy": schema.StringAttribute{ - Computed: true, - }, - "host_network": schema.BoolAttribute{ - Computed: true, - }, - "service_account_name": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "containers": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "args": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, - "commands": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, - "image": schema.StringAttribute{ - Computed: true, - }, - "image_pull_policy": schema.StringAttribute{ - Computed: true, - }, - "name": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "env": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - "value": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - "resources": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "limits": schema.MapAttribute{ - Computed: true, - ElementType: types.StringType, - }, - "requests": schema.MapAttribute{ - Computed: true, - ElementType: types.StringType, - }, - }, - }, - "security_context": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "privileged": schema.BoolAttribute{ - Computed: true, - }, - "run_as_user": schema.Int64Attribute{ - Computed: true, - }, - "run_as_group": schema.Int64Attribute{ - Computed: true, - }, - "run_as_non_root": schema.BoolAttribute{ - Computed: true, - }, - "read_only_root_filesystem": schema.BoolAttribute{ - Computed: true, - }, - }, - }, - "volume_mounts": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "mount_path": schema.StringAttribute{ - Computed: true, - }, - "name": schema.StringAttribute{ - Computed: true, - }, - "read_only": schema.BoolAttribute{ - Computed: true, - }, - }, - }, - }, - }, - }, - }, - "metadata": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "labels": schema.MapAttribute{ - Computed: true, - ElementType: types.StringType, - }, - }, - }, - "volumes": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "empty_dir": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "medium": schema.StringAttribute{ - Computed: true, - }, - "size_limit": schema.Int64Attribute{ - Computed: true, - }, - }, - }, - "host_path": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "path": schema.StringAttribute{ - Computed: true, - }, - }, - }, - "secret": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "secret_name": schema.StringAttribute{ - Computed: true, - }, - "optional": schema.BoolAttribute{ - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "node_properties": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "main_node": schema.Int64Attribute{ - Computed: true, - }, - "num_nodes": schema.Int64Attribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "node_range_properties": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "target_nodes": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "container": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "command": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, - "execution_role_arn": schema.StringAttribute{ - Computed: true, - }, - "image": schema.StringAttribute{ - Computed: true, - }, - "instance_type": schema.StringAttribute{ - Computed: true, - }, - "job_role_arn": schema.StringAttribute{ - Computed: true, - }, - "privileged": schema.BoolAttribute{ - Computed: true, - }, - "readonly_root_filesystem": schema.BoolAttribute{ - Computed: true, - }, - "user": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "environment": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - "value": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - "ephemeral_storage": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "size_in_gib": schema.Int64Attribute{ - Computed: true, - }, - }, - }, - "fargate_platform_configuration": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "platform_version": schema.StringAttribute{ - Computed: true, - }, - }, - }, - "linux_parameters": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "init_process_enabled": schema.BoolAttribute{ - Computed: true, - }, - "max_swap": schema.Int64Attribute{ - Computed: true, - }, - "shared_memory_size": schema.Int64Attribute{ - Computed: true, - }, - "swappiness": schema.Int64Attribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "devices": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "host_path": schema.StringAttribute{ - Computed: true, - }, - "container_path": schema.StringAttribute{ - Computed: true, - }, - "permissions": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, - }, - }, - }, - "tmpfs": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "container_path": schema.StringAttribute{ - Computed: true, - }, - "size": schema.Int64Attribute{ - Computed: true, - }, - "mount_options": schema.ListAttribute{ - Computed: true, - ElementType: types.StringType, - }, - }, - }, - }, - }, - }, - "log_configuration": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "options": schema.MapAttribute{ - Computed: true, - ElementType: types.StringType, - }, - "log_driver": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "secret_options": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - "value_from": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - }, - }, - "mount_points": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "container_path": schema.StringAttribute{ - Computed: true, - }, - "read_only": schema.BoolAttribute{ - Computed: true, - }, - "source_volume": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - "network_configuration": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "assign_public_ip": schema.StringAttribute{ - Computed: true, - }, - }, - }, - "resource_requirements": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "type": schema.StringAttribute{ - Computed: true, - }, - "value": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - "runtime_platform": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "cpu_architecture": schema.StringAttribute{ - Computed: true, - }, - "operating_system_family": schema.StringAttribute{ - Computed: true, - }, - }, - }, - "secrets": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - "value_from": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - "ulimits": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "hard_limit": schema.Int64Attribute{ - Computed: true, - }, - "name": schema.StringAttribute{ - Computed: true, - }, - "soft_limit": schema.Int64Attribute{ - Computed: true, - }, - }, - }, - }, - "volumes": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "host": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "source_path": schema.StringAttribute{ - Computed: true, - }, - }, - }, - "efs_volume_configuration": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "file_system_id": schema.StringAttribute{ - Computed: true, - }, - "root_directory": schema.StringAttribute{ - Computed: true, - }, - "transit_encryption": schema.StringAttribute{ - Computed: true, - }, - "transit_encryption_port": schema.Int64Attribute{ - Computed: true, - }, - }, - Blocks: map[string]schema.Block{ - "authorization_config": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "access_point_id": schema.StringAttribute{ - Computed: true, - }, - "iam": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "retry_strategy": schema.SingleNestedBlock{ - Attributes: map[string]schema.Attribute{ - "attempts": schema.Int64Attribute{ - Optional: true, - Validators: []validator.Int64{ - int64validator.Between(1, 10), - }, - }, - }, - Blocks: map[string]schema.Block{ - "evaluate_on_exit": schema.ListNestedBlock{ - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "action": schema.StringAttribute{ - Computed: true, - }, - "on_exit_code": schema.StringAttribute{ - Computed: true, - }, - "on_reason": schema.StringAttribute{ - Computed: true, - }, - "on_status_reason": schema.StringAttribute{ - Computed: true, - }, - }, - }, - }, - }, - }, - }, } } func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { - conn := d.Meta().BatchClient(ctx) - - var data dataSourceJobDefinitionData + var data jobDefinitionDataSourceModel response.Diagnostics.Append(request.Config.Get(ctx, &data)...) if response.Diagnostics.HasError() { return } - jd := awstypes.JobDefinition{} + conn := d.Meta().BatchClient(ctx) + + var jd *awstypes.JobDefinition + + if !data.JobDefinitionARN.IsNull() { + arn := data.JobDefinitionARN.ValueString() + input := &batch.DescribeJobDefinitionsInput{ + JobDefinitions: []string{arn}, + } - if !data.ARN.IsNull() { - out, err := FindJobDefinitionV2ByARN(ctx, conn, aws.StringValue(flex.StringFromFramework(ctx, data.ARN))) + output, err := findJobDefinitionV2(ctx, conn, input) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s)", arn), err.Error()) + return } - // nosemgrep:ci.semgrep.aws.prefer-pointer-conversion-assignment - jd = *out - } - if !data.Name.IsNull() { + jd = output + } else if !data.JobDefinitionName.IsNull() { + name := data.JobDefinitionName.ValueString() + status := jobDefinitionStatusActive + if !data.Status.IsNull() { + status = data.Status.ValueString() + } input := &batch.DescribeJobDefinitionsInput{ - JobDefinitionName: flex.StringFromFramework(ctx, data.Name), + JobDefinitionName: aws.String(name), + Status: aws.String(status), } - if data.Status.IsNull() { - active := jobDefinitionStatusActive - input.Status = &active - } else { - input.Status = flex.StringFromFramework(ctx, data.Status) - } + output, err := findJobDefinitionsV2(ctx, conn, input) - jds, err := ListJobDefinitionsV2ByNameWithStatus(ctx, conn, input) + if len(output) == 0 { + err = tfresource.NewEmptyResultError(input) + } if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definitions (%s/%s)", name, status), err.Error()) + + return + } + + if data.Revision.IsNull() { + slices.SortFunc(output, func(a, b awstypes.JobDefinition) int { + return int(aws.ToInt32(a.Revision) - aws.ToInt32(b.Revision)) + }) + + jd = &output[len(output)-1] + } else { + } if !data.Revision.IsNull() { @@ -696,7 +252,34 @@ func (r *jobDefinitionDataSource) ConfigValidators(context.Context) []resource.C } } -func frameworkFlattenEKSproperties(ctx context.Context, apiObject *awstypes.EksProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func findJobDefinitionV2(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) (*awstypes.JobDefinition, error) { + output, err := findJobDefinitionsV2(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findJobDefinitionsV2(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) ([]awstypes.JobDefinition, error) { + var output []awstypes.JobDefinition + + pages := batch.NewDescribeJobDefinitionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.JobDefinitions...) + } + + return output, nil +} + +func frameworkFlattenEKSproperties(ctx context.Context, apiObject *awstypes.EksProperties, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { if apiObject == nil { data.EksProperties = types.ObjectNull(eksPropertiesAttr) return diags @@ -793,7 +376,7 @@ func frameworkFlattenEKSContainer(ctx context.Context, apiObject []awstypes.EksC return containers } -func frameworkFlattenNodeProperties(ctx context.Context, props *awstypes.NodeProperties, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func frameworkFlattenNodeProperties(ctx context.Context, props *awstypes.NodeProperties, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { att := fwtypes.AttributeTypesMust[frameworkNodeProperties](ctx) if props == nil { data.EksProperties = types.ObjectNull(att) @@ -1103,7 +686,7 @@ func flattenContainerTmpfs(ctx context.Context, tmpfs []awstypes.Tmpfs) (data [] return } -func frameworkFlattenRetryStrategy(ctx context.Context, jd *awstypes.RetryStrategy, data *dataSourceJobDefinitionData) (diags diag.Diagnostics) { +func frameworkFlattenRetryStrategy(ctx context.Context, jd *awstypes.RetryStrategy, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { att := fwtypes.AttributeTypesMust[retryStrategy](ctx) att["evaluate_on_exit"] = types.ListType{ElemType: types.ObjectType{AttrTypes: evaluateOnExitAttr}} if jd == nil { @@ -1146,23 +729,6 @@ func frameworkFlattenRetryStrategy(ctx context.Context, jd *awstypes.RetryStrate return diags } -type dataSourceJobDefinitionData struct { - ARN fwtypes.ARN `tfsdk:"arn"` - ARNPrefix fwtypes.ARN `tfsdk:"arn_prefix"` - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Revision types.Int64 `tfsdk:"revision"` - Status types.String `tfsdk:"status"` - Tags types.Map `tfsdk:"tags"` - Type types.String `tfsdk:"type"` - ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` - SchedulingPriority types.Int64 `tfsdk:"scheduling_priority"` - RetryStrategy types.Object `tfsdk:"retry_strategy"` - Timeout types.Object `tfsdk:"timeout"` - NodeProperties types.Object `tfsdk:"node_properties"` - EksProperties types.Object `tfsdk:"eks_properties"` -} - type retryStrategy struct { Attempts types.Int64 `tfsdk:"attempts"` EvaluateOnExit types.Object `tfsdk:"evaluate_on_exit"` @@ -1380,6 +946,23 @@ var hostAttr = map[string]attr.Type{ "source_path": types.StringType, } +type jobDefinitionDataSourceModel struct { + ARNPrefix fwtypes.ARN `tfsdk:"arn_prefix"` + ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` + EKSProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPropertiesModel] `tfsdk:"eks_properties"` + ID types.String `tfsdk:"id"` + JobDefinitionARN fwtypes.ARN `tfsdk:"arn"` + JobDefinitionName types.String `tfsdk:"name"` + NodeProperties fwtypes.ListNestedObjectValueOf[jobDefinitionNodePropertiesModel] `tfsdk:"node_properties"` + RetryStrategy fwtypes.ListNestedObjectValueOf[jobDefinitionRetryStrategyModel] `tfsdk:"retry_strategy"` + Revision types.Int64 `tfsdk:"revision"` + SchedulingPriority types.Int64 `tfsdk:"scheduling_priority"` + Status types.String `tfsdk:"status"` + Tags types.Map `tfsdk:"tags"` + Timeout fwtypes.ListNestedObjectValueOf[jobDefinitionJobTimeoutModel] `tfsdk:"timeout"` + Type types.String `tfsdk:"type"` +} + type jobDefinitionEKSPropertiesModel struct { PodProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPodPropertiesModel] `tfsdk:"pod_properties"` } From a9c1061413159401b9d64acf934d083265d5ba77 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 18 Feb 2024 16:50:04 -0500 Subject: [PATCH 14/17] d/aws_batch_job_definition: AutoFlEx. --- internal/service/batch/findv2.go | 54 -- .../batch/job_definition_data_source.go | 741 +----------------- .../batch/job_definition_data_source_test.go | 44 -- 3 files changed, 16 insertions(+), 823 deletions(-) delete mode 100644 internal/service/batch/findv2.go diff --git a/internal/service/batch/findv2.go b/internal/service/batch/findv2.go deleted file mode 100644 index 439af4f2894..00000000000 --- a/internal/service/batch/findv2.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package batch - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/batch" - "github.com/aws/aws-sdk-go-v2/service/batch/types" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindJobDefinitionV2ByARN(ctx context.Context, conn *batch.Client, arn string) (*types.JobDefinition, error) { - input := &batch.DescribeJobDefinitionsInput{ - JobDefinitions: []string{arn}, - } - - out, err := conn.DescribeJobDefinitions(ctx, input) - - if err != nil { - return nil, err - } - - if out == nil || len(out.JobDefinitions) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(out.JobDefinitions); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return &out.JobDefinitions[0], nil -} - -func ListJobDefinitionsV2ByNameWithStatus(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) ([]types.JobDefinition, error) { - var out []types.JobDefinition - - pages := batch.NewDescribeJobDefinitionsPaginator(conn, input) - for pages.HasMorePages() { - page, err := pages.NextPage(ctx) - - if err != nil { - return nil, err - } - out = append(out, page.JobDefinitions...) - } - - if len(out) == 0 { - return out, tfresource.NewEmptyResultError(input) - } - - return out, nil -} diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index b4e869ced64..fbe9b5811bd 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "slices" - "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/batch" @@ -17,14 +16,12 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -170,75 +167,36 @@ func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.R } if data.Revision.IsNull() { + // Sort in descending revision order. slices.SortFunc(output, func(a, b awstypes.JobDefinition) int { - return int(aws.ToInt32(a.Revision) - aws.ToInt32(b.Revision)) + return int(aws.ToInt32(b.Revision) - aws.ToInt32(a.Revision)) }) - jd = &output[len(output)-1] + jd = &output[0] } else { + revision := int32(data.Revision.ValueInt64()) + i := slices.IndexFunc(output, func(v awstypes.JobDefinition) bool { + return aws.ToInt32(v.Revision) == revision + }) - } + if i == -1 { + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s/%s) reviosn (%d)", name, status, revision), tfresource.NewEmptyResultError(input).Error()) - if !data.Revision.IsNull() { - for _, _jd := range jds { - if aws.Int32Value(_jd.Revision) == int32(data.Revision.ValueInt64()) { - jd = _jd - } + return } - if jd.JobDefinitionArn == nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.Batch, create.ErrActionReading, DSNameJobDefinition, data.Name.String(), fmt.Errorf("job definition revision %d not found", data.Revision.ValueInt64())), - fmt.Sprintf("job definition revision %d not found with name %s", data.Revision.ValueInt64(), data.Name.String()), - ) - } - } - - if data.Revision.IsNull() { - var latestRevision int32 - for _, _jd := range jds { - if aws.Int32Value(_jd.Revision) > latestRevision { - latestRevision = aws.Int32Value(_jd.Revision) - jd = _jd - } - } + jd = &output[i] } } - // These fields don't have the same name as their api - data.ARN = flex.StringToFrameworkARN(ctx, jd.JobDefinitionArn) - arnPrefix := strings.TrimSuffix(aws.StringValue(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.Int32Value(jd.Revision))) - data.ARNPrefix = flex.StringToFrameworkARN(ctx, aws.String(arnPrefix)) - data.ID = flex.StringToFramework(ctx, jd.JobDefinitionArn) - data.Name = flex.StringToFramework(ctx, jd.JobDefinitionName) - - data.Revision = flex.Int32ToFramework(ctx, jd.Revision) - data.Status = flex.StringToFramework(ctx, jd.Status) - data.Type = flex.StringToFramework(ctx, jd.Type) - data.ContainerOrchestrationType = types.StringValue(string(jd.ContainerOrchestrationType)) - data.SchedulingPriority = flex.Int32ToFramework(ctx, jd.SchedulingPriority) - if jd.Timeout != nil { - data.Timeout = types.ObjectValueMust(timeoutAttr, map[string]attr.Value{ - "attempt_duration_seconds": flex.Int32ToFramework(ctx, jd.Timeout.AttemptDurationSeconds), - }) - } else { - data.Timeout = types.ObjectNull(timeoutAttr) - } - - response.Diagnostics.Append(frameworkFlattenNodeProperties(ctx, jd.NodeProperties, &data)...) - if response.Diagnostics.HasError() { - return - } - - response.Diagnostics.Append(frameworkFlattenEKSproperties(ctx, jd.EksProperties, &data)...) + response.Diagnostics.Append(fwflex.Flatten(ctx, jd, &data)...) if response.Diagnostics.HasError() { return } - response.Diagnostics.Append(frameworkFlattenRetryStrategy(ctx, jd.RetryStrategy, &data)...) - if response.Diagnostics.HasError() { - return - } + // Tags? + // arnPrefix := strings.TrimSuffix(aws.StringValue(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.Int32Value(jd.Revision))) + // data.ARNPrefix = flex.StringToFrameworkARN(ctx, aws.String(arnPrefix)) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } @@ -279,673 +237,6 @@ func findJobDefinitionsV2(ctx context.Context, conn *batch.Client, input *batch. return output, nil } -func frameworkFlattenEKSproperties(ctx context.Context, apiObject *awstypes.EksProperties, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { - if apiObject == nil { - data.EksProperties = types.ObjectNull(eksPropertiesAttr) - return diags - } - props := map[string]attr.Value{ - "dns_policy": flex.StringToFramework(ctx, apiObject.PodProperties.DnsPolicy), - "host_network": flex.BoolToFramework(ctx, apiObject.PodProperties.HostNetwork), - "service_account_name": flex.StringToFramework(ctx, apiObject.PodProperties.ServiceAccountName), - } - - if apiObject.PodProperties.Metadata != nil { - props["metadata"] = types.ObjectValueMust(eksMetadataAttr, map[string]attr.Value{ - "labels": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(apiObject.PodProperties.Metadata.Labels)), - }) - } else { - props["metadata"] = types.ObjectNull(eksMetadataAttr) - } - - if len(apiObject.PodProperties.Containers) > 0 { - container, d := types.ListValue(types.ObjectType{AttrTypes: eksContainerAttr}, frameworkFlattenEKSContainer(ctx, apiObject.PodProperties.Containers)) - diags.Append(d...) - if diags.HasError() { - return diags - } - props["containers"] = container - } else { - props["containers"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerAttr}) - } - if len(apiObject.PodProperties.Volumes) > 0 { - volume, d := types.ListValue(types.ObjectType{AttrTypes: eksVolumeAttr}, frameworkFlattenEKSVolume(ctx, apiObject.PodProperties.Volumes)) - diags.Append(d...) - if diags.HasError() { - return diags - } - props["volumes"] = volume - } else { - props["volumes"] = types.ListNull(types.ObjectType{AttrTypes: eksVolumeAttr}) - } - - podProps, d := types.ObjectValue(eksPodPropertiesAttr, props) - diags.Append(d...) - if diags.HasError() { - return diags - } - data.EksProperties = types.ObjectValueMust(eksPropertiesAttr, map[string]attr.Value{ - "pod_properties": podProps, - }) - return diags -} - -func frameworkFlattenEKSContainer(ctx context.Context, apiObject []awstypes.EksContainer) (containers []attr.Value) { - for _, c := range apiObject { - props := map[string]attr.Value{ - "image": flex.StringToFramework(ctx, c.Image), - "image_pull_policy": flex.StringToFramework(ctx, c.ImagePullPolicy), - "name": flex.StringToFramework(ctx, c.Name), - "args": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Args)), - "commands": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Command)), - } - if c.SecurityContext != nil { - props["security_context"] = types.ObjectValueMust(eksContainerSecurityContextAttr, map[string]attr.Value{ - "privileged": flex.BoolToFramework(ctx, c.SecurityContext.Privileged), - "run_as_user": flex.Int64ToFramework(ctx, c.SecurityContext.RunAsUser), - "run_as_group": flex.Int64ToFramework(ctx, c.SecurityContext.RunAsGroup), - "run_as_non_root": flex.BoolToFramework(ctx, c.SecurityContext.RunAsNonRoot), - "read_only_root_filesystem": flex.BoolToFramework(ctx, c.SecurityContext.ReadOnlyRootFilesystem), - }) - } else { - props["security_context"] = types.ObjectNull(eksContainerSecurityContextAttr) - } - if len(c.VolumeMounts) > 0 { - props["volume_mounts"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}, frameworkFlattenEKSContainerVolumeMount(ctx, c.VolumeMounts)) - } else { - props["volume_mounts"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}) - } - - if len(c.Env) > 0 { - props["env"] = types.ListValueMust(types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}, frameworkFlattenEKSContainerEnv(ctx, c.Env)) - } else { - props["env"] = types.ListNull(types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}) - } - - if c.Resources != nil { - props["resources"] = types.ObjectValueMust(eksContainerResourceRequirementsAttr, map[string]attr.Value{ - "limits": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.Resources.Limits)), - "requests": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.Resources.Requests)), - }) - } else { - props["resources"] = types.ObjectNull(eksContainerResourceRequirementsAttr) - } - - containers = append(containers, types.ObjectValueMust(eksContainerAttr, props)) - } - return containers -} - -func frameworkFlattenNodeProperties(ctx context.Context, props *awstypes.NodeProperties, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { - att := fwtypes.AttributeTypesMust[frameworkNodeProperties](ctx) - if props == nil { - data.EksProperties = types.ObjectNull(att) - return - } - att["node_range_properties"] = types.ListType{ElemType: types.ObjectType{AttrTypes: nodeRangePropertiesAttr}} - if props == nil { - data.NodeProperties = types.ObjectNull(att) - return - } - - var properties []attr.Value - for _, v := range props.NodeRangeProperties { - container, d := types.ObjectValue(containerPropertiesAttr, frameworkFlattenContainerProperties(ctx, v.Container)) - diags = append(diags, d...) - if diags.HasError() { - return - } - properties = append(properties, types.ObjectValueMust(nodeRangePropertiesAttr, map[string]attr.Value{ - "container": container, - "target_nodes": flex.StringToFramework(ctx, v.TargetNodes), - })) - } - data.NodeProperties = types.ObjectValueMust(att, map[string]attr.Value{ - "main_node": flex.Int32ToFramework(ctx, props.MainNode), - "num_nodes": flex.Int32ToFramework(ctx, props.NumNodes), - "node_range_properties": types.ListValueMust(types.ObjectType{AttrTypes: nodeRangePropertiesAttr}, properties), - }) - return -} - -func frameworkFlattenEKSVolume(ctx context.Context, apiObject []awstypes.EksVolume) (volumes []attr.Value) { - for _, v := range apiObject { - volume := map[string]attr.Value{ - "name": flex.StringToFramework(ctx, v.Name), - } - if v.EmptyDir != nil { - volume["empty_dir"] = types.ObjectValueMust(eksVolumeEmptyDirAttr, map[string]attr.Value{ - "medium": flex.StringToFramework(ctx, v.EmptyDir.Medium), - "size_limit": flex.StringToFramework(ctx, v.EmptyDir.SizeLimit), - }) - } else { - volume["empty_dir"] = types.ObjectNull(eksVolumeEmptyDirAttr) - } - if v.HostPath != nil { - volume["host"] = types.ObjectValueMust(eksVolumeHostPathAttr, map[string]attr.Value{ - "path": flex.StringToFramework(ctx, v.HostPath.Path), - }) - } else { - volume["host"] = types.ObjectNull(eksVolumeHostPathAttr) - } - if v.Secret != nil { - volume["secret"] = types.ObjectValueMust(eksVolumeSecretAttr, map[string]attr.Value{ - "secret_name": flex.StringToFramework(ctx, v.Secret.SecretName), - "optional": flex.BoolToFramework(ctx, v.Secret.Optional), - }) - } else { - volume["secret"] = types.ObjectNull(eksVolumeSecretAttr) - } - volumes = append(volumes, types.ObjectValueMust(eksVolumeAttr, volume)) - } - return volumes -} - -func frameworkFlattenEKSContainerVolumeMount(ctx context.Context, apiObject []awstypes.EksContainerVolumeMount) (volumeMounts []attr.Value) { - for _, v := range apiObject { - volumeMounts = append(volumeMounts, types.ObjectValueMust(eksContainerVolumeMountAttr, map[string]attr.Value{ - "mount_path": flex.StringToFramework(ctx, v.MountPath), - "name": flex.StringToFramework(ctx, v.Name), - "read_only": flex.BoolToFramework(ctx, v.ReadOnly), - })) - } - return -} - -func frameworkFlattenEKSContainerEnv(ctx context.Context, apiObject []awstypes.EksContainerEnvironmentVariable) (env []attr.Value) { - for _, v := range apiObject { - env = append(env, types.ObjectValueMust(eksContainerEnvironmentVariableAttr, map[string]attr.Value{ - "name": flex.StringToFramework(ctx, v.Name), - "value": flex.StringToFramework(ctx, v.Value), - })) - } - return -} - -func frameworkFlattenContainerProperties(ctx context.Context, c *awstypes.ContainerProperties) map[string]attr.Value { - containerProps := map[string]attr.Value{ - "command": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(c.Command)), - "execution_role_arn": flex.StringToFramework(ctx, c.ExecutionRoleArn), - "image": flex.StringToFramework(ctx, c.Image), - "instance_type": flex.StringToFramework(ctx, c.InstanceType), - "job_role_arn": flex.StringToFramework(ctx, c.JobRoleArn), - "privileged": flex.BoolToFramework(ctx, c.Privileged), - "readonly_root_filesystem": flex.BoolToFramework(ctx, c.ReadonlyRootFilesystem), - "user": flex.StringToFramework(ctx, c.User), - } - - if (c.EphemeralStorage != nil) && (c.EphemeralStorage.SizeInGiB != nil) { - containerProps["ephemeral_storage"] = types.ObjectValueMust(ephemeralStorageAttr, map[string]attr.Value{ - "size_in_gib": flex.Int32ToFramework(ctx, c.EphemeralStorage.SizeInGiB), - }) - } else { - containerProps["ephemeral_storage"] = types.ObjectNull(ephemeralStorageAttr) - } - - if c.LinuxParameters != nil { - containerProps["linux_parameters"] = types.ObjectValueMust( - linuxParametersAttr, - frameworkFlattenContainerLinuxParameters(ctx, c.LinuxParameters), - ) - } else { - containerProps["linux_parameters"] = types.ObjectNull(linuxParametersAttr) - } - - if c.FargatePlatformConfiguration != nil { - containerProps["fargate_platform_configuration"] = types.ObjectValueMust(fargatePlatformConfigurationAttr, map[string]attr.Value{ - "platform_version": flex.StringToFramework(ctx, c.FargatePlatformConfiguration.PlatformVersion), - }) - } else { - containerProps["fargate_platform_configuration"] = types.ObjectNull(fargatePlatformConfigurationAttr) - } - - if c.NetworkConfiguration != nil { - containerProps["network_configuration"] = types.ObjectValueMust(networkConfigurationAttr, map[string]attr.Value{ - "assign_public_ip": flex.StringToFramework(ctx, aws.String(string(c.NetworkConfiguration.AssignPublicIp))), - }) - } else { - containerProps["network_configuration"] = types.ObjectNull(networkConfigurationAttr) - } - - if c.RuntimePlatform != nil { - containerProps["runtime_platform"] = types.ObjectValueMust(runtimePlatformAttr, map[string]attr.Value{ - "cpu_architecture": flex.StringToFramework(ctx, c.RuntimePlatform.CpuArchitecture), - "operating_system_family": flex.StringToFramework(ctx, c.RuntimePlatform.OperatingSystemFamily), - }) - } else { - containerProps["runtime_platform"] = types.ObjectNull(runtimePlatformAttr) - } - - var environment []attr.Value - if len(c.Environment) > 0 { - for _, env := range c.Environment { - environment = append(environment, types.ObjectValueMust(keyValuePairAttr, map[string]attr.Value{ - "name": flex.StringToFramework(ctx, env.Name), - "value": flex.StringToFramework(ctx, env.Value), - })) - } - containerProps["environment"] = types.ListValueMust(types.ObjectType{AttrTypes: keyValuePairAttr}, environment) - } else { - containerProps["environment"] = types.ListNull(types.ObjectType{AttrTypes: keyValuePairAttr}) - } - if len(c.MountPoints) > 0 { - var mountPoints []attr.Value - for _, m := range c.MountPoints { - mountPoints = append(mountPoints, types.ObjectValueMust(mountPointAttr, map[string]attr.Value{ - "container_path": flex.StringToFramework(ctx, m.ContainerPath), - "read_only": flex.BoolToFramework(ctx, m.ReadOnly), - "source_volume": flex.StringToFramework(ctx, m.SourceVolume), - })) - } - containerProps["mount_points"] = types.ListValueMust(types.ObjectType{AttrTypes: mountPointAttr}, mountPoints) - } else { - containerProps["mount_points"] = types.ListNull(types.ObjectType{AttrTypes: mountPointAttr}) - } - - var logConfigurationSecrets []attr.Value - if c.LogConfiguration != nil { - if len(c.LogConfiguration.SecretOptions) > 0 { - for _, sec := range c.LogConfiguration.SecretOptions { - logConfigurationSecrets = append(logConfigurationSecrets, types.ObjectValueMust(secretAttr, map[string]attr.Value{ - "name": flex.StringToFramework(ctx, sec.Name), - "value_from": flex.StringToFramework(ctx, sec.ValueFrom), - })) - } - containerProps["log_configuration"] = types.ObjectValueMust(logConfigurationAttr, map[string]attr.Value{ - "options": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.LogConfiguration.Options)), - "log_driver": flex.StringToFramework(ctx, aws.String(string(c.LogConfiguration.LogDriver))), - "secret_opts": types.ListValueMust(types.ObjectType{AttrTypes: secretAttr}, logConfigurationSecrets), - }) - } else { - containerProps["log_configuration"] = types.ObjectValueMust(logConfigurationAttr, map[string]attr.Value{ - "options": flex.FlattenFrameworkStringMap(ctx, aws.StringMap(c.LogConfiguration.Options)), - "log_driver": flex.StringToFramework(ctx, aws.String(string(c.LogConfiguration.LogDriver))), - "secret_opts": types.ListNull(types.ObjectType{AttrTypes: secretAttr}), - }) - } - } else { - containerProps["log_configuration"] = types.ObjectNull(logConfigurationAttr) - } - - var resourceRequirements []attr.Value - if len(c.ResourceRequirements) > 0 { - for _, res := range c.ResourceRequirements { - resourceRequirements = append(resourceRequirements, types.ObjectValueMust(resourceRequirementsAttr, map[string]attr.Value{ - "type": flex.StringToFramework(ctx, aws.String(string(res.Type))), - "value": flex.StringToFramework(ctx, res.Value), - })) - } - containerProps["resource_requirements"] = types.ListValueMust(types.ObjectType{AttrTypes: resourceRequirementsAttr}, resourceRequirements) - } else { - containerProps["resource_requirements"] = types.ListNull(types.ObjectType{AttrTypes: resourceRequirementsAttr}) - } - - var secrets []attr.Value - if len(c.Secrets) > 0 { - for _, sec := range c.Secrets { - secrets = append(secrets, types.ObjectValueMust(secretAttr, map[string]attr.Value{ - "name": flex.StringToFramework(ctx, sec.Name), - "value_from": flex.StringToFramework(ctx, sec.ValueFrom), - })) - } - containerProps["secrets"] = types.ListValueMust(types.ObjectType{AttrTypes: secretAttr}, secrets) - } else { - containerProps["secrets"] = types.ListNull(types.ObjectType{AttrTypes: secretAttr}) - } - - if len(c.Ulimits) > 0 { - var ulimits []attr.Value - for _, ul := range c.Ulimits { - ulimits = append(ulimits, types.ObjectValueMust(ulimitsAttr, map[string]attr.Value{ - "hard_limit": flex.Int32ToFramework(ctx, ul.HardLimit), - "name": flex.StringToFramework(ctx, ul.Name), - "soft_limit": flex.Int32ToFramework(ctx, ul.SoftLimit), - })) - } - containerProps["ulimits"] = types.ListValueMust(types.ObjectType{AttrTypes: ulimitsAttr}, ulimits) - } else { - containerProps["ulimits"] = types.ListNull(types.ObjectType{AttrTypes: ulimitsAttr}) - } - - if len(c.Volumes) > 0 { - var volumes []attr.Value - for _, vol := range c.Volumes { - volume := map[string]attr.Value{ - "name": flex.StringToFramework(ctx, vol.Name), - } - if vol.Host != nil { - volume["host"] = types.ObjectValueMust(hostAttr, map[string]attr.Value{ - "source_path": flex.StringToFramework(ctx, vol.Host.SourcePath), - }) - } - if vol.EfsVolumeConfiguration != nil { - volume["efs_volume_configuration"] = types.ObjectValueMust(efsVolumeConfigurationAttr, map[string]attr.Value{ - "file_system_id": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.FileSystemId), - "root_directory": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.RootDirectory), - "transit_encryption": flex.StringToFramework(ctx, aws.String(string(vol.EfsVolumeConfiguration.TransitEncryption))), - "transit_encryption_port": flex.Int32ToFramework(ctx, vol.EfsVolumeConfiguration.TransitEncryptionPort), - "authorization_config": types.ObjectValueMust(authorizationConfigAttr, map[string]attr.Value{ - "access_point_id": flex.StringToFramework(ctx, vol.EfsVolumeConfiguration.AuthorizationConfig.AccessPointId), - "iam": flex.StringToFramework(ctx, aws.String(string(vol.EfsVolumeConfiguration.AuthorizationConfig.Iam))), - }), - }) - } - volumes = append(volumes, types.ObjectValueMust(volumeAttr, volume)) - } - containerProps["volumes"] = types.ListValueMust(types.ObjectType{AttrTypes: volumeAttr}, volumes) - } else { - containerProps["volumes"] = types.ListNull(types.ObjectType{AttrTypes: volumeAttr}) - } - return containerProps -} - -func frameworkFlattenContainerLinuxParameters(ctx context.Context, lp *awstypes.LinuxParameters) map[string]attr.Value { - linuxProps := map[string]attr.Value{ - "init_process_enabled": flex.BoolToFramework(ctx, lp.InitProcessEnabled), - "max_swap": flex.Int32ToFramework(ctx, lp.MaxSwap), - "shared_memory_size": flex.Int32ToFramework(ctx, lp.SharedMemorySize), - "swappiness": flex.Int32ToFramework(ctx, lp.Swappiness), - } - if len(lp.Devices) > 0 { - linuxProps["devices"] = types.ListValueMust(types.ObjectType{AttrTypes: deviceAttr}, frameworkFlattenContainerDevices(ctx, lp.Devices)) - } else { - linuxProps["devices"] = types.ListNull(types.ObjectType{AttrTypes: deviceAttr}) - } - if len(lp.Tmpfs) > 0 { - linuxProps["tmpfs"] = types.ListValueMust(types.ObjectType{AttrTypes: tmpfsAttr}, flattenContainerTmpfs(ctx, lp.Tmpfs)) - } else { - linuxProps["tmpfs"] = types.ListNull(types.ObjectType{AttrTypes: tmpfsAttr}) - } - linuxProps["linux_parameters"] = types.ObjectValueMust(linuxParametersAttr, linuxProps) - return linuxProps -} - -func frameworkFlattenContainerDevices(ctx context.Context, devices []awstypes.Device) (data []attr.Value) { - for _, dev := range devices { - var perms []string - for _, perm := range dev.Permissions { - perms = append(perms, string(perm)) - } - data = append(data, types.ObjectValueMust(deviceAttr, map[string]attr.Value{ - "host_path": flex.StringToFramework(ctx, dev.HostPath), - "container_path": flex.StringToFramework(ctx, dev.ContainerPath), - "permissions": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(perms)), - })) - } - return -} - -func flattenContainerTmpfs(ctx context.Context, tmpfs []awstypes.Tmpfs) (data []attr.Value) { - for _, tmp := range tmpfs { - data = append(data, types.ObjectValueMust(tmpfsAttr, map[string]attr.Value{ - "container_path": flex.StringToFramework(ctx, tmp.ContainerPath), - "size": flex.Int32ToFramework(ctx, tmp.Size), - "mount_options": flex.FlattenFrameworkStringList(ctx, aws.StringSlice(tmp.MountOptions)), - })) - } - return -} - -func frameworkFlattenRetryStrategy(ctx context.Context, jd *awstypes.RetryStrategy, data *jobDefinitionDataSourceModel) (diags diag.Diagnostics) { - att := fwtypes.AttributeTypesMust[retryStrategy](ctx) - att["evaluate_on_exit"] = types.ListType{ElemType: types.ObjectType{AttrTypes: evaluateOnExitAttr}} - if jd == nil { - data.RetryStrategy = types.ObjectNull(att) - return diags - } - - var elems []attr.Value - for _, apiObject := range jd.EvaluateOnExit { - obj := map[string]attr.Value{ - "action": flex.StringToFramework(ctx, aws.String(string(apiObject.Action))), - "on_exit_code": flex.StringToFramework(ctx, apiObject.OnExitCode), - "on_reason": flex.StringToFramework(ctx, apiObject.OnReason), - "on_status_reason": flex.StringToFramework(ctx, apiObject.OnStatusReason), - } - elem, d := types.ObjectValue(evaluateOnExitAttr, obj) - diags.Append(d...) - if diags.HasError() { - return diags - } - elems = append(elems, elem) - } - - if elems == nil { - data.RetryStrategy = types.ObjectValueMust(att, map[string]attr.Value{ - "attempts": flex.Int32ToFramework(ctx, jd.Attempts), - "evaluate_on_exit": types.ListNull(types.ObjectType{AttrTypes: evaluateOnExitAttr}), - }) - } else { - eval, d := types.ListValue(types.ObjectType{AttrTypes: evaluateOnExitAttr}, elems) - diags.Append(d...) - if diags.HasError() { - return diags - } - data.RetryStrategy = types.ObjectValueMust(att, map[string]attr.Value{ - "attempts": flex.Int32ToFramework(ctx, jd.Attempts), - "evaluate_on_exit": eval, - }) - } - return diags -} - -type retryStrategy struct { - Attempts types.Int64 `tfsdk:"attempts"` - EvaluateOnExit types.Object `tfsdk:"evaluate_on_exit"` -} - -var timeoutAttr = map[string]attr.Type{ - "attempt_duration_seconds": types.Int64Type, -} - -var eksPropertiesAttr = map[string]attr.Type{ - "pod_properties": types.ObjectType{AttrTypes: eksPodPropertiesAttr}, -} - -var eksPodPropertiesAttr = map[string]attr.Type{ - "containers": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerAttr}}, - "dns_policy": types.StringType, - "host_network": types.BoolType, - "metadata": types.ObjectType{AttrTypes: eksMetadataAttr}, - "service_account_name": types.StringType, - "volumes": types.ListType{ElemType: types.ObjectType{AttrTypes: eksVolumeAttr}}, -} - -var eksContainerAttr = map[string]attr.Type{ - "args": types.ListType{ElemType: types.StringType}, - "commands": types.ListType{ElemType: types.StringType}, - "env": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerEnvironmentVariableAttr}}, - "image": types.StringType, - "image_pull_policy": types.StringType, - "name": types.StringType, - "resources": types.ObjectType{AttrTypes: eksContainerResourceRequirementsAttr}, - "security_context": types.ObjectType{AttrTypes: eksContainerSecurityContextAttr}, - "volume_mounts": types.ListType{ElemType: types.ObjectType{AttrTypes: eksContainerVolumeMountAttr}}, -} - -var eksContainerEnvironmentVariableAttr = map[string]attr.Type{ - "name": types.StringType, - "value": types.StringType, -} - -var eksContainerResourceRequirementsAttr = map[string]attr.Type{ - "limits": types.MapType{ElemType: types.StringType}, - "requests": types.MapType{ElemType: types.StringType}, -} - -var eksContainerSecurityContextAttr = map[string]attr.Type{ - "privileged": types.BoolType, - "run_as_user": types.Int64Type, - "run_as_group": types.Int64Type, - "run_as_non_root": types.BoolType, - "read_only_root_filesystem": types.BoolType, -} - -var eksContainerVolumeMountAttr = map[string]attr.Type{ - "mount_path": types.StringType, - "name": types.StringType, - "read_only": types.BoolType, -} - -var eksMetadataAttr = map[string]attr.Type{ - "labels": types.MapType{ElemType: types.StringType}, -} - -var eksVolumeAttr = map[string]attr.Type{ - "name": types.StringType, - "empty_dir": types.ObjectType{AttrTypes: eksVolumeEmptyDirAttr}, - "host_path": types.ObjectType{AttrTypes: eksVolumeHostPathAttr}, - "secret": types.ObjectType{AttrTypes: eksVolumeSecretAttr}, -} - -var eksVolumeEmptyDirAttr = map[string]attr.Type{ - "medium": types.StringType, - "size_limit": types.Int64Type, -} - -var eksVolumeHostPathAttr = map[string]attr.Type{ - "path": types.StringType, -} - -var eksVolumeSecretAttr = map[string]attr.Type{ - "secret_name": types.StringType, - "optional": types.BoolType, -} - -type frameworkNodeProperties struct { - MainNode types.Int64 `tfsdk:"main_node"` - NodeRangeProperties types.List `tfsdk:"node_range_properties"` - NumNodes types.Int64 `tfsdk:"num_nodes"` -} - -var evaluateOnExitAttr = map[string]attr.Type{ - "action": types.StringType, - "on_exit_code": types.StringType, - "on_reason": types.StringType, - "on_status_reason": types.StringType, -} - -var nodeRangePropertiesAttr = map[string]attr.Type{ - "container": types.ObjectType{AttrTypes: containerPropertiesAttr}, - "target_nodes": types.StringType, -} - -var containerPropertiesAttr = map[string]attr.Type{ - "command": types.ListType{ElemType: types.StringType}, - "environment": types.ListType{ElemType: types.ObjectType{AttrTypes: keyValuePairAttr}}, - "ephemeral_storage": types.ObjectType{AttrTypes: ephemeralStorageAttr}, - "execution_role_arn": types.StringType, - "fargate_platform_configuration": types.ObjectType{AttrTypes: fargatePlatformConfigurationAttr}, - "image": types.StringType, - "instance_type": types.StringType, - "job_role_arn": types.StringType, - "linux_parameters": types.ObjectType{AttrTypes: linuxParametersAttr}, - "log_configuration": types.ObjectType{AttrTypes: logConfigurationAttr}, - "mount_points": types.ListType{ElemType: types.ObjectType{AttrTypes: mountPointAttr}}, - "network_configuration": types.ObjectType{AttrTypes: networkConfigurationAttr}, - "privileged": types.BoolType, - "readonly_root_filesystem": types.BoolType, - "resource_requirements": types.ListType{ElemType: types.ObjectType{AttrTypes: resourceRequirementsAttr}}, - "runtime_platform": types.ObjectType{AttrTypes: runtimePlatformAttr}, - "secrets": types.ListType{ElemType: types.ObjectType{AttrTypes: secretAttr}}, - "ulimits": types.ListType{ElemType: types.ObjectType{AttrTypes: ulimitsAttr}}, - "user": types.StringType, - "volumes": types.ListType{ElemType: types.ObjectType{AttrTypes: volumeAttr}}, -} - -var keyValuePairAttr = map[string]attr.Type{ - "name": types.StringType, - "value": types.StringType, -} - -var ephemeralStorageAttr = map[string]attr.Type{ - "size_in_gib": types.Int64Type, -} - -var fargatePlatformConfigurationAttr = map[string]attr.Type{ - "platform_version": types.StringType, -} - -var linuxParametersAttr = map[string]attr.Type{ - "devices": types.ListType{ElemType: types.ObjectType{AttrTypes: deviceAttr}}, - "init_process_enabled": types.BoolType, - "max_swap": types.Int64Type, - "shared_memory_size": types.Int64Type, - "swappiness": types.Int64Type, - "tmpfs": types.ListType{ElemType: types.ObjectType{AttrTypes: tmpfsAttr}}, -} - -var logConfigurationAttr = map[string]attr.Type{ - "options": types.MapType{ElemType: types.StringType}, - "secret_options": types.ListType{ElemType: types.ObjectType{AttrTypes: secretAttr}}, - "log_driver": types.StringType, -} -var tmpfsAttr = map[string]attr.Type{ - "container_path": types.StringType, - "mount_options": types.ListType{ElemType: types.StringType}, - "size": types.Int64Type, -} - -var deviceAttr = map[string]attr.Type{ - "container_path": types.StringType, - "host_path": types.StringType, - "permissions": types.ListType{ElemType: types.StringType}, -} - -var mountPointAttr = map[string]attr.Type{ - "container_path": types.StringType, - "read_only": types.BoolType, - "source_volume": types.StringType, -} - -var networkConfigurationAttr = map[string]attr.Type{ - "assign_public_ip": types.StringType, -} - -var resourceRequirementsAttr = map[string]attr.Type{ - "type": types.StringType, - "value": types.StringType, -} - -var runtimePlatformAttr = map[string]attr.Type{ - "cpu_architecture": types.StringType, - "operating_system_family": types.StringType, -} - -var secretAttr = map[string]attr.Type{ - "name": types.StringType, - "value_from": types.StringType, -} - -var ulimitsAttr = map[string]attr.Type{ - "hard_limit": types.Int64Type, - "name": types.StringType, - "soft_limit": types.Int64Type, -} - -var volumeAttr = map[string]attr.Type{ - "efs_volume_configuration": types.ObjectType{AttrTypes: efsVolumeConfigurationAttr}, - "host": types.ObjectType{AttrTypes: hostAttr}, - "name": types.StringType, -} - -var efsVolumeConfigurationAttr = map[string]attr.Type{ - "authorization_config": types.ObjectType{AttrTypes: authorizationConfigAttr}, - "file_system_id": types.StringType, - "root_directory": types.StringType, - "transit_encryption": types.StringType, - "transit_encryption_port": types.Int64Type, -} - -var authorizationConfigAttr = map[string]attr.Type{ - "access_point_id": types.StringType, - "iam": types.StringType, -} - -var hostAttr = map[string]attr.Type{ - "source_path": types.StringType, -} - type jobDefinitionDataSourceModel struct { ARNPrefix fwtypes.ARN `tfsdk:"arn_prefix"` ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go index 5376abf121f..ad8112003a9 100644 --- a/internal/service/batch/job_definition_data_source_test.go +++ b/internal/service/batch/job_definition_data_source_test.go @@ -4,25 +4,18 @@ package batch_test import ( - "context" "fmt" "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/service/batch/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" - tfbatch "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { ctx := acctest.Context(t) - - var jd types.JobDefinition rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_batch_job_definition.test" resourceName := "aws_batch_job_definition.test" @@ -40,7 +33,6 @@ func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicName(rName, "1"), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), @@ -51,7 +43,6 @@ func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { // specify revision Config: testAccJobDefinitionDataSourceConfig_basicNameRevision(rName, "2", 2), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), ), }, @@ -61,8 +52,6 @@ func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { ctx := acctest.Context(t) - - var jd types.JobDefinition rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_batch_job_definition.test" @@ -79,7 +68,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "1"), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), @@ -89,7 +77,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "2"), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), ), }, @@ -99,8 +86,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { ctx := acctest.Context(t) - - var jd types.JobDefinition rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_batch_job_definition.test" @@ -117,7 +102,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicARNNode(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttr(dataSourceName, "node_properties.main_node", "0"), resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.#", "2"), resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.0.container.image", "busybox"), @@ -129,8 +113,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { ctx := acctest.Context(t) - - var jd types.JobDefinition rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_batch_job_definition.test" @@ -147,7 +129,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicARNEKS(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckJobDefinitionV2Exists(ctx, dataSourceName, &jd), resource.TestCheckResourceAttr(dataSourceName, "type", "container"), resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.0.image", "public.ecr.aws/amazonlinux/amazonlinux:1"), @@ -157,31 +138,6 @@ func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { }) } -func testAccCheckJobDefinitionV2Exists(ctx context.Context, n string, jd *types.JobDefinition) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Batch Job Queue ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).BatchClient(ctx) - - jobDefinition, err := tfbatch.FindJobDefinitionV2ByARN(ctx, conn, rs.Primary.ID) - - if err != nil { - return err - } - - *jd = *jobDefinition - - return nil - } -} - func testAccJobDefinitionDataSourceConfig_basicARN(rName string, increment string) string { return acctest.ConfigCompose( testAccJobDefinitionDataSourceConfig_container(rName, increment), From 1c301baa5cff2c2927ca54eaf65b7179840986c6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 19 Feb 2024 14:42:26 -0500 Subject: [PATCH 15/17] d/aws_batch_job_definition: Set 'arn_prefix' and 'tags'. --- .../batch/job_definition_data_source.go | 9 +++++---- .../batch/job_definition_data_source_test.go | 18 +++++------------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index fbe9b5811bd..921d51a5357 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "slices" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/batch" @@ -194,9 +195,9 @@ func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.R return } - // Tags? - // arnPrefix := strings.TrimSuffix(aws.StringValue(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.Int32Value(jd.Revision))) - // data.ARNPrefix = flex.StringToFrameworkARN(ctx, aws.String(arnPrefix)) + arnPrefix := strings.TrimSuffix(aws.ToString(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.ToInt32(jd.Revision))) + data.ARNPrefix = types.StringValue(arnPrefix) + data.Tags = fwflex.FlattenFrameworkStringValueMap(ctx, jd.Tags) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } @@ -238,7 +239,7 @@ func findJobDefinitionsV2(ctx context.Context, conn *batch.Client, input *batch. } type jobDefinitionDataSourceModel struct { - ARNPrefix fwtypes.ARN `tfsdk:"arn_prefix"` + ARNPrefix types.String `tfsdk:"arn_prefix"` ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` EKSProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPropertiesModel] `tfsdk:"eks_properties"` ID types.String `tfsdk:"id"` diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go index ad8112003a9..595eb631e3d 100644 --- a/internal/service/batch/job_definition_data_source_test.go +++ b/internal/service/batch/job_definition_data_source_test.go @@ -36,7 +36,6 @@ func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), - acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), ), }, { @@ -144,8 +143,6 @@ func testAccJobDefinitionDataSourceConfig_basicARN(rName string, increment strin ` data "aws_batch_job_definition" "test" { arn = aws_batch_job_definition.test.arn - - depends_on = [aws_batch_job_definition.test] } `) } @@ -169,8 +166,6 @@ func testAccJobDefinitionDataSourceConfig_basicNameRevision(rName string, increm data "aws_batch_job_definition" "test" { name = %[1]q revision = %[2]d - - depends_on = [aws_batch_job_definition.test] } `, rName, revision)) } @@ -197,18 +192,15 @@ func testAccJobDefinitionDataSourceConfig_basicARNNode(rName string) string { return acctest.ConfigCompose( testAccJobDefinitionConfig_NodeProperties(rName), ` data "aws_batch_job_definition" "test" { - arn = aws_batch_job_definition.test.arn - depends_on = [aws_batch_job_definition.test] -}`, - ) + arn = aws_batch_job_definition.test.arn +}`) } func testAccJobDefinitionDataSourceConfig_basicARNEKS(rName string) string { return acctest.ConfigCompose( testAccJobDefinitionConfig_EKSProperties_basic(rName), ` data "aws_batch_job_definition" "test" { - arn = aws_batch_job_definition.test.arn - depends_on = [aws_batch_job_definition.test] -}`, - ) + arn = aws_batch_job_definition.test.arn +} +`) } From 2505938303942daeaf8c97949392e46ecd00c577 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 19 Feb 2024 15:09:44 -0500 Subject: [PATCH 16/17] d/aws_batch_job_definition: Get acceptance tests passing. --- .../batch/job_definition_data_source.go | 6 ++--- .../batch/job_definition_data_source_test.go | 23 ++++++++----------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 921d51a5357..56601f24aee 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -181,7 +181,7 @@ func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.R }) if i == -1 { - response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s/%s) reviosn (%d)", name, status, revision), tfresource.NewEmptyResultError(input).Error()) + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s/%s) revision (%d)", name, status, revision), tfresource.NewEmptyResultError(input).Error()) return } @@ -287,7 +287,7 @@ type jobDefinitionEKSContainerEnvironmentVariableModel struct { type jobDefinitionEKSContainerResourceRequirementsModel struct { Limits fwtypes.MapValueOf[types.String] `tfsdk:"limits"` - Requests fwtypes.MapValueOf[types.String] `tfsdk:"limits"` + Requests fwtypes.MapValueOf[types.String] `tfsdk:"requests"` } type jobDefinitionEKSContainerSecurityContextModel struct { @@ -310,8 +310,8 @@ type jobDefinitionEKSMetadataModel struct { type jobDefinitionEKSVolumeModel struct { EmptyDir fwtypes.ListNestedObjectValueOf[jobDefinitionEKSEmptyDirModel] `tfsdk:"empty_dir"` - Name types.String `tfsdk:"name"` HostPath fwtypes.ListNestedObjectValueOf[jobDefinitionEKSHostPathModel] `tfsdk:"host_path"` + Name types.String `tfsdk:"name"` Secret fwtypes.ListNestedObjectValueOf[jobDefinitionEKSSecretModel] `tfsdk:"secret"` } diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go index 595eb631e3d..ed47a11162c 100644 --- a/internal/service/batch/job_definition_data_source_test.go +++ b/internal/service/batch/job_definition_data_source_test.go @@ -7,7 +7,6 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -28,18 +27,16 @@ func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccJobDefinitionDataSourceConfig_basicName(rName, "1"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.0.attempts", "10"), resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), - resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), ), }, { - // specify revision Config: testAccJobDefinitionDataSourceConfig_basicNameRevision(rName, "2", 2), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), @@ -62,15 +59,12 @@ func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "1"), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.0.attempts", "10"), resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), - resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.attempts", "10"), - acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s:\d+`, rName))), - acctest.MatchResourceAttrRegionalARN(dataSourceName, "arn_prefix", "batch", regexache.MustCompile(fmt.Sprintf(`job-definition/%s`, rName))), ), }, { @@ -96,14 +90,13 @@ func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccJobDefinitionDataSourceConfig_basicARNNode(rName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(dataSourceName, "node_properties.main_node", "0"), - resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.#", "2"), - resource.TestCheckResourceAttr(dataSourceName, "node_properties.node_range_properties.0.container.image", "busybox"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.main_node", "0"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.node_range_properties.#", "2"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.node_range_properties.0.container.0.image", "busybox"), ), }, }, @@ -128,9 +121,9 @@ func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { { Config: testAccJobDefinitionDataSourceConfig_basicARNEKS(rName), Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.0.pod_properties.0.containers.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.0.pod_properties.0.containers.0.image", "public.ecr.aws/amazonlinux/amazonlinux:1"), resource.TestCheckResourceAttr(dataSourceName, "type", "container"), - resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.#", "1"), - resource.TestCheckResourceAttr(dataSourceName, "eks_properties.pod_properties.containers.0.image", "public.ecr.aws/amazonlinux/amazonlinux:1"), ), }, }, @@ -166,6 +159,8 @@ func testAccJobDefinitionDataSourceConfig_basicNameRevision(rName string, increm data "aws_batch_job_definition" "test" { name = %[1]q revision = %[2]d + + depends_on = [aws_batch_job_definition.test] } `, rName, revision)) } From 4a89becdea4f21ee3985ef3e61499becb741e3da Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 19 Feb 2024 15:46:13 -0500 Subject: [PATCH 17/17] Fix golangci-lint 'revive/receiver-naming'. --- internal/service/batch/job_definition_data_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go index 921d51a5357..a42e11e209f 100644 --- a/internal/service/batch/job_definition_data_source.go +++ b/internal/service/batch/job_definition_data_source.go @@ -202,7 +202,7 @@ func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.R response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *jobDefinitionDataSource) ConfigValidators(context.Context) []resource.ConfigValidator { +func (d *jobDefinitionDataSource) ConfigValidators(context.Context) []resource.ConfigValidator { return []resource.ConfigValidator{ resourcevalidator.ExactlyOneOf( path.MatchRoot(names.AttrARN),