diff --git a/.changelog/34663.txt b/.changelog/34663.txt new file mode 100644 index 000000000000..5630e2635731 --- /dev/null +++ b/.changelog/34663.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_batch_job_definition +``` diff --git a/go.mod b/go.mod index 79387e35b7e9..5a98c1c3c377 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/apprunner v1.27.0 github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 + github.com/aws/aws-sdk-go-v2/service/batch v1.32.0 github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 github.com/aws/aws-sdk-go-v2/service/budgets v1.21.0 diff --git a/go.sum b/go.sum index 7d7a4a6ec798..cfdcdc001f2e 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,8 @@ github.com/aws/aws-sdk-go-v2/service/athena v1.39.0 h1:oVrFdlLcYETrVftzF0Q/Dr0tf github.com/aws/aws-sdk-go-v2/service/athena v1.39.0/go.mod h1:PPlSmhFoI4r5BGLB+6YDUHSU3E77brazZXLcj2DeQZQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0 h1:wW06a5cOpVYJ1NrjmcKpk54xqUYK2PbL0ttOcXKyBrQ= github.com/aws/aws-sdk-go-v2/service/auditmanager v1.31.0/go.mod h1:KPniIDEIjjhk8v1hkQeBeUcSPS0i/MAGXt80hUr6Cbc= +github.com/aws/aws-sdk-go-v2/service/batch v1.32.0 h1:KIV3V/Edj0N7dG38u6wjq6zytO6prVjDewT88KzXlVE= +github.com/aws/aws-sdk-go-v2/service/batch v1.32.0/go.mod h1:75qh07u8lNpPtIoUlKWN5RCDga/yN6PDJyPVFLlVaMU= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0 h1:Eah+mRIMPbq3KdgLpUT44nCJi7cECjy5U2fgFO0jiiQ= github.com/aws/aws-sdk-go-v2/service/bedrock v1.6.0/go.mod h1:orxULvnjYi9X3Na7eGy27KD6uOE8vDvyJCNJejmU92E= github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.3.0 h1:pAaehMb08sPnGBvPnm0paurEj6EtjCEwxaw8WZN51LA= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 837245166bfc..6f9126b90b97 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -14,6 +14,7 @@ import ( apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" bedrockagent_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrockagent" budgets_sdkv2 "github.com/aws/aws-sdk-go-v2/service/budgets" @@ -349,6 +350,10 @@ func (c *AWSClient) BatchConn(ctx context.Context) *batch_sdkv1.Batch { return errs.Must(conn[*batch_sdkv1.Batch](ctx, c, names.Batch, make(map[string]any))) } +func (c *AWSClient) BatchClient(ctx context.Context) *batch_sdkv2.Client { + return errs.Must(client[*batch_sdkv2.Client](ctx, c, names.Batch, make(map[string]any))) +} + func (c *AWSClient) BedrockClient(ctx context.Context) *bedrock_sdkv2.Client { return errs.Must(client[*bedrock_sdkv2.Client](ctx, c, names.Bedrock, make(map[string]any))) } diff --git a/internal/service/batch/enum.go b/internal/service/batch/enum.go new file mode 100644 index 000000000000..c21f12002856 --- /dev/null +++ b/internal/service/batch/enum.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +const ( + jobDefinitionStatusActive string = "ACTIVE" + jobDefinitionStatusInactive string = "INACTIVE" +) + +func jobDefinitionStatus_Values() []string { + return []string{ + jobDefinitionStatusInactive, + jobDefinitionStatusActive, + } +} diff --git a/internal/service/batch/job_definition_data_source.go b/internal/service/batch/job_definition_data_source.go new file mode 100644 index 000000000000..433cac66056d --- /dev/null +++ b/internal/service/batch/job_definition_data_source.go @@ -0,0 +1,474 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/batch" + awstypes "github.com/aws/aws-sdk-go-v2/service/batch/types" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Job Definition") +func newJobDefinitionDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + return &jobDefinitionDataSource{}, nil +} + +type jobDefinitionDataSource struct { + framework.DataSourceWithConfigure +} + +func (d *jobDefinitionDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + response.TypeName = "aws_batch_job_definition" +} + +func (d *jobDefinitionDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: schema.StringAttribute{ + Optional: true, + CustomType: fwtypes.ARNType, + }, + "arn_prefix": schema.StringAttribute{ + Computed: true, + }, + "container_orchestration_type": schema.StringAttribute{ + Computed: true, + }, + "eks_properties": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionEKSPropertiesModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "pod_properties": fwtypes.NewListNestedObjectTypeOf[jobDefinitionEKSPodPropertiesModel](ctx), + }, + }, + }, + names.AttrID: framework.IDAttribute(), + names.AttrName: schema.StringAttribute{ + Optional: true, + }, + "node_properties": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionNodePropertiesModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "main_node": types.Int64Type, + "node_range_properties": fwtypes.NewListNestedObjectTypeOf[jobDefinitionNodeRangePropertyModel](ctx), + "num_nodes": types.Int64Type, + }, + }, + }, + "retry_strategy": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionRetryStrategyModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "attempts": types.Int64Type, + "evaluate_on_exit": fwtypes.NewListNestedObjectTypeOf[jobDefinitionEvaluateOnExitModel](ctx), + }, + }, + }, + "revision": schema.Int64Attribute{ + Optional: true, + }, + "scheduling_priority": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf(jobDefinitionStatus_Values()...), + }, + }, + names.AttrTags: tftags.TagsAttributeComputedOnly(), + "timeout": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[jobDefinitionJobTimeoutModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "attempt_duration_seconds": types.Int64Type, + }, + }, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (d *jobDefinitionDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var data jobDefinitionDataSourceModel + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := d.Meta().BatchClient(ctx) + + var jd *awstypes.JobDefinition + + if !data.JobDefinitionARN.IsNull() { + arn := data.JobDefinitionARN.ValueString() + input := &batch.DescribeJobDefinitionsInput{ + JobDefinitions: []string{arn}, + } + + output, err := findJobDefinitionV2(ctx, conn, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s)", arn), err.Error()) + + return + } + + jd = output + } else if !data.JobDefinitionName.IsNull() { + name := data.JobDefinitionName.ValueString() + status := jobDefinitionStatusActive + if !data.Status.IsNull() { + status = data.Status.ValueString() + } + input := &batch.DescribeJobDefinitionsInput{ + JobDefinitionName: aws.String(name), + Status: aws.String(status), + } + + output, err := findJobDefinitionsV2(ctx, conn, input) + + if len(output) == 0 { + err = tfresource.NewEmptyResultError(input) + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definitions (%s/%s)", name, status), err.Error()) + + return + } + + if data.Revision.IsNull() { + // Sort in descending revision order. + slices.SortFunc(output, func(a, b awstypes.JobDefinition) int { + return int(aws.ToInt32(b.Revision) - aws.ToInt32(a.Revision)) + }) + + jd = &output[0] + } else { + revision := int32(data.Revision.ValueInt64()) + i := slices.IndexFunc(output, func(v awstypes.JobDefinition) bool { + return aws.ToInt32(v.Revision) == revision + }) + + if i == -1 { + response.Diagnostics.AddError(fmt.Sprintf("reading Batch Job Definition (%s/%s) revision (%d)", name, status, revision), tfresource.NewEmptyResultError(input).Error()) + + return + } + + jd = &output[i] + } + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, jd, &data)...) + if response.Diagnostics.HasError() { + return + } + + arnPrefix := strings.TrimSuffix(aws.ToString(jd.JobDefinitionArn), fmt.Sprintf(":%d", aws.ToInt32(jd.Revision))) + data.ARNPrefix = types.StringValue(arnPrefix) + data.Tags = fwflex.FlattenFrameworkStringValueMap(ctx, jd.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (d *jobDefinitionDataSource) ConfigValidators(context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.ExactlyOneOf( + path.MatchRoot(names.AttrARN), + path.MatchRoot(names.AttrName), + ), + } +} + +func findJobDefinitionV2(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) (*awstypes.JobDefinition, error) { + output, err := findJobDefinitionsV2(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findJobDefinitionsV2(ctx context.Context, conn *batch.Client, input *batch.DescribeJobDefinitionsInput) ([]awstypes.JobDefinition, error) { + var output []awstypes.JobDefinition + + pages := batch.NewDescribeJobDefinitionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.JobDefinitions...) + } + + return output, nil +} + +type jobDefinitionDataSourceModel struct { + ARNPrefix types.String `tfsdk:"arn_prefix"` + ContainerOrchestrationType types.String `tfsdk:"container_orchestration_type"` + EKSProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPropertiesModel] `tfsdk:"eks_properties"` + ID types.String `tfsdk:"id"` + JobDefinitionARN fwtypes.ARN `tfsdk:"arn"` + JobDefinitionName types.String `tfsdk:"name"` + NodeProperties fwtypes.ListNestedObjectValueOf[jobDefinitionNodePropertiesModel] `tfsdk:"node_properties"` + RetryStrategy fwtypes.ListNestedObjectValueOf[jobDefinitionRetryStrategyModel] `tfsdk:"retry_strategy"` + Revision types.Int64 `tfsdk:"revision"` + SchedulingPriority types.Int64 `tfsdk:"scheduling_priority"` + Status types.String `tfsdk:"status"` + Tags types.Map `tfsdk:"tags"` + Timeout fwtypes.ListNestedObjectValueOf[jobDefinitionJobTimeoutModel] `tfsdk:"timeout"` + Type types.String `tfsdk:"type"` +} + +type jobDefinitionEKSPropertiesModel struct { + PodProperties fwtypes.ListNestedObjectValueOf[jobDefinitionEKSPodPropertiesModel] `tfsdk:"pod_properties"` +} + +type jobDefinitionEKSPodPropertiesModel struct { + Containers fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerModel] `tfsdk:"containers"` + DNSPolicy types.String `tfsdk:"dns_policy"` + HostNetwork types.Bool `tfsdk:"host_network"` + Metadata fwtypes.ListNestedObjectValueOf[jobDefinitionEKSMetadataModel] `tfsdk:"metadata"` + ServiceAccountName types.Bool `tfsdk:"service_account_name"` + Volumes fwtypes.ListNestedObjectValueOf[jobDefinitionEKSVolumeModel] `tfsdk:"volumes"` +} + +type jobDefinitionEKSContainerModel struct { + Args fwtypes.ListValueOf[types.String] `tfsdk:"args"` + Command fwtypes.ListValueOf[types.String] `tfsdk:"command"` + Env fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerEnvironmentVariableModel] `tfsdk:"env"` + Image types.String `tfsdk:"image"` + ImagePullPolicy types.String `tfsdk:"image_pull_policy"` + Name types.String `tfsdk:"name"` + Resources fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerResourceRequirementsModel] `tfsdk:"resources"` + SecurityContext fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerSecurityContextModel] `tfsdk:"security_context"` + VolumeMounts fwtypes.ListNestedObjectValueOf[jobDefinitionEKSContainerVolumeMountModel] `tfsdk:"volume_mounts"` +} + +type jobDefinitionEKSContainerEnvironmentVariableModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionEKSContainerResourceRequirementsModel struct { + Limits fwtypes.MapValueOf[types.String] `tfsdk:"limits"` + Requests fwtypes.MapValueOf[types.String] `tfsdk:"requests"` +} + +type jobDefinitionEKSContainerSecurityContextModel struct { + Privileged types.Bool `tfsdk:"privileged"` + ReadOnlyRootFilesystem types.Bool `tfsdk:"read_only_root_file_system"` + RunAsGroup types.Int64 `tfsdk:"run_as_group"` + RunAsNonRoot types.Bool `tfsdk:"run_as_non_root"` + RunAsUser types.Int64 `tfsdk:"run_as_user"` +} + +type jobDefinitionEKSContainerVolumeMountModel struct { + MountPath types.String `tfsdk:"mount_path"` + Name types.String `tfsdk:"name"` + ReadOnly types.Bool `tfsdk:"read_only"` +} + +type jobDefinitionEKSMetadataModel struct { + Labels fwtypes.MapValueOf[types.String] `tfsdk:"labels"` +} + +type jobDefinitionEKSVolumeModel struct { + EmptyDir fwtypes.ListNestedObjectValueOf[jobDefinitionEKSEmptyDirModel] `tfsdk:"empty_dir"` + HostPath fwtypes.ListNestedObjectValueOf[jobDefinitionEKSHostPathModel] `tfsdk:"host_path"` + Name types.String `tfsdk:"name"` + Secret fwtypes.ListNestedObjectValueOf[jobDefinitionEKSSecretModel] `tfsdk:"secret"` +} + +type jobDefinitionEKSEmptyDirModel struct { + Medium types.String `tfsdk:"medium"` + SizeLimit types.String `tfsdk:"size_limit"` +} + +type jobDefinitionEKSHostPathModel struct { + Path types.String `tfsdk:"path"` +} + +type jobDefinitionEKSSecretModel struct { + Optional types.Bool `tfsdk:"optional"` + SecretName types.String `tfsdk:"secret_name"` +} + +type jobDefinitionNodePropertiesModel struct { + MainNode types.Int64 `tfsdk:"main_node"` + NodeRangeProperties fwtypes.ListNestedObjectValueOf[jobDefinitionNodeRangePropertyModel] `tfsdk:"node_range_properties"` + NumNodes types.Int64 `tfsdk:"num_nodes"` +} + +type jobDefinitionNodeRangePropertyModel struct { + Container fwtypes.ListNestedObjectValueOf[jobDefinitionContainerPropertiesModel] `tfsdk:"container"` + TargetNodes types.String `tfsdk:"target_nodes"` +} + +type jobDefinitionContainerPropertiesModel struct { + Command fwtypes.ListValueOf[types.String] `tfsdk:"command"` + Environment fwtypes.ListNestedObjectValueOf[jobDefinitionKeyValuePairModel] `tfsdk:"environment"` + EphemeralStorage fwtypes.ListNestedObjectValueOf[jobDefinitionEphemeralStorageModel] `tfsdk:"ephemeral_storage"` + ExecutionRoleARN types.String `tfsdk:"execution_role_arn"` + FargatePlatformConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionFargatePlatformConfigurationModel] `tfsdk:"fargate_platform_configuration"` + Image types.String `tfsdk:"image"` + InstanceType types.String `tfsdk:"instance_type"` + JobRoleARN types.String `tfsdk:"job_role_arn"` + LinuxParameters fwtypes.ListNestedObjectValueOf[jobDefinitionLinuxParametersModel] `tfsdk:"linux_parameters"` + LogConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionLogConfigurationModel] `tfsdk:"log_configuration"` + MountPoints fwtypes.ListNestedObjectValueOf[jobDefinitionMountPointModel] `tfsdk:"mount_points"` + NetworkConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionNetworkConfigurationModel] `tfsdk:"network_configuration"` + Privileged types.Bool `tfsdk:"privileged"` + ReadonlyRootFilesystem types.Bool `tfsdk:"readonly_root_filesystem"` + ResourceRequirements fwtypes.ListNestedObjectValueOf[jobDefinitionResourceRequirementModel] `tfsdk:"resource_requirements"` + RuntimePlatform fwtypes.ListNestedObjectValueOf[jobDefinitionRuntimePlatformModel] `tfsdk:"runtime_platform"` + Secrets fwtypes.ListNestedObjectValueOf[jobDefinitionSecretModel] `tfsdk:"secrets"` + Ulimits fwtypes.ListNestedObjectValueOf[jobDefinitionUlimitModel] `tfsdk:"ulimits"` + User types.String `tfsdk:"user"` + Volumes fwtypes.ListNestedObjectValueOf[jobDefinitionVolumeModel] `tfsdk:"volumes"` +} + +type jobDefinitionKeyValuePairModel struct { + Name types.String `tfsdk:"name"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionEphemeralStorageModel struct { + SizeInGiB types.Int64 `tfsdk:"size_in_gib"` +} + +type jobDefinitionFargatePlatformConfigurationModel struct { + PlatformVersion types.String `tfsdk:"platform_version"` +} + +type jobDefinitionLinuxParametersModel struct { + Devices fwtypes.ListNestedObjectValueOf[jobDefinitionDeviceModel] `tfsdk:"devices"` + InitProcessEnabled types.Bool `tfsdk:"init_process_enabled"` + MaxSwap types.Int64 `tfsdk:"max_swap"` + SharedMemorySize types.Int64 `tfsdk:"shared_memory_size"` + Swappiness types.Int64 `tfsdk:"swappiness"` + Tmpfs fwtypes.ListNestedObjectValueOf[jobDefinitionTmpfsModel] `tfsdk:"tmpfs"` +} + +type jobDefinitionDeviceModel struct { + ContainerPath types.String `tfsdk:"container_path"` + HostPath types.String `tfsdk:"host_path"` + Permissions fwtypes.ListValueOf[types.String] `tfsdk:"permissions"` +} + +type jobDefinitionTmpfsModel struct { + ContainerPath types.String `tfsdk:"container_path"` + MountOptions fwtypes.ListValueOf[types.String] `tfsdk:"mount_options"` + Size types.Int64 `tfsdk:"size"` +} + +type jobDefinitionLogConfigurationModel struct { + LogDriver types.String `tfsdk:"log_driver"` + Options fwtypes.MapValueOf[types.String] `tfsdk:"options"` + SecretOptions fwtypes.ListNestedObjectValueOf[jobDefinitionSecretModel] `tfsdk:"secret_options"` +} + +type jobDefinitionSecretModel struct { + Name types.String `tfsdk:"name"` + ValueFrom types.String `tfsdk:"value_from"` +} + +type jobDefinitionMountPointModel struct { + ContainerPath types.String `tfsdk:"container_path"` + ReadOnly types.Bool `tfsdk:"read_only"` + SourceVolume types.String `tfsdk:"source_volume"` +} + +type jobDefinitionNetworkConfigurationModel struct { + AssignPublicIP types.Bool `tfsdk:"assign_public_ip"` +} + +type jobDefinitionResourceRequirementModel struct { + Type types.String `tfsdk:"type"` + Value types.String `tfsdk:"value"` +} + +type jobDefinitionRuntimePlatformModel struct { + CPUArchitecture types.String `tfsdk:"cpu_architecture"` + OperatingSystemFamily types.String `tfsdk:"operating_system_family"` +} + +type jobDefinitionUlimitModel struct { + HardLimit types.Int64 `tfsdk:"hard_limit"` + Name types.String `tfsdk:"name"` + SoftLimit types.Int64 `tfsdk:"soft_limit"` +} + +type jobDefinitionVolumeModel struct { + EFSVolumeConfiguration fwtypes.ListNestedObjectValueOf[jobDefinitionEFSVolumeConfigurationModel] `tfsdk:"efs_volume_configuration"` + Host fwtypes.ListNestedObjectValueOf[jobDefinitionHostModel] `tfsdk:"host"` + Name types.String `tfsdk:"name"` +} + +type jobDefinitionEFSVolumeConfigurationModel struct { + AuthorizationConfig fwtypes.ListNestedObjectValueOf[jobDefinitionEFSAuthorizationConfigModel] `tfsdk:"authorization_config"` + FileSystemID types.String `tfsdk:"file_system_id"` + RootDirectory types.String `tfsdk:"root_directory"` + TransitEncryption types.String `tfsdk:"transit_encryption"` + TransitEncryptionPort types.Int64 `tfsdk:"transit_encryption_port"` +} + +type jobDefinitionEFSAuthorizationConfigModel struct { + AccessPointID types.String `tfsdk:"access_point_id"` + IAM types.String `tfsdk:"iam"` +} + +type jobDefinitionHostModel struct { + SourcePath types.String `tfsdk:"source_path"` +} + +type jobDefinitionRetryStrategyModel struct { + Attempts types.Int64 `tfsdk:"attempts"` + EvaluateOnExit fwtypes.ListNestedObjectValueOf[jobDefinitionEvaluateOnExitModel] `tfsdk:"evaluate_on_exit"` +} + +type jobDefinitionEvaluateOnExitModel struct { + Action types.String `tfsdk:"action"` + OnExitCode types.String `tfsdk:"on_exit_code"` + OnReason types.String `tfsdk:"on_reason"` + OnStatusReason types.String `tfsdk:"on_status_reason"` +} + +type jobDefinitionJobTimeoutModel struct { + AttemptDurationSeconds types.Int64 `tfsdk:"attempt_duration_seconds"` +} diff --git a/internal/service/batch/job_definition_data_source_test.go b/internal/service/batch/job_definition_data_source_test.go new file mode 100644 index 000000000000..ed47a11162cc --- /dev/null +++ b/internal/service/batch/job_definition_data_source_test.go @@ -0,0 +1,201 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch_test + +import ( + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccBatchJobDefinitionDataSource_basicName(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + resourceName := "aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicName(rName, "1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.0.attempts", "10"), + resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), + ), + }, + { + Config: testAccJobDefinitionDataSourceConfig_basicNameRevision(rName, "2", 2), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "retry_strategy.0.attempts", "10"), + resource.TestCheckResourceAttr(dataSourceName, "revision", "1"), + ), + }, + { + Config: testAccJobDefinitionDataSourceConfig_basicARN(rName, "2"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "revision", "2"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN_NodeProperties(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARNNode(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.main_node", "0"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.node_range_properties.#", "2"), + resource.TestCheckResourceAttr(dataSourceName, "node_properties.0.node_range_properties.0.container.0.image", "busybox"), + ), + }, + }, + }) +} + +func TestAccBatchJobDefinitionDataSource_basicARN_EKSProperties(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BatchEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BatchEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionDataSourceConfig_basicARNEKS(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.0.pod_properties.0.containers.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "eks_properties.0.pod_properties.0.containers.0.image", "public.ecr.aws/amazonlinux/amazonlinux:1"), + resource.TestCheckResourceAttr(dataSourceName, "type", "container"), + ), + }, + }, + }) +} + +func testAccJobDefinitionDataSourceConfig_basicARN(rName string, increment string) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn +} +`) +} + +func testAccJobDefinitionDataSourceConfig_basicName(rName string, increment string) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + fmt.Sprintf(` +data "aws_batch_job_definition" "test" { + name = %[1]q + + depends_on = [aws_batch_job_definition.test] +} +`, rName, increment)) +} + +func testAccJobDefinitionDataSourceConfig_basicNameRevision(rName string, increment string, revision int) string { + return acctest.ConfigCompose( + testAccJobDefinitionDataSourceConfig_container(rName, increment), + fmt.Sprintf(` +data "aws_batch_job_definition" "test" { + name = %[1]q + revision = %[2]d + + depends_on = [aws_batch_job_definition.test] +} +`, rName, revision)) +} + +func testAccJobDefinitionDataSourceConfig_container(rName string, increment string) string { + return fmt.Sprintf(` +resource "aws_batch_job_definition" "test" { + container_properties = jsonencode({ + command = ["echo", "test%[2]s"] + image = "busybox" + memory = 128 + vcpus = 1 + }) + name = %[1]q + type = "container" + retry_strategy { + attempts = 10 + } +} +`, rName, increment) +} + +func testAccJobDefinitionDataSourceConfig_basicARNNode(rName string) string { + return acctest.ConfigCompose( + testAccJobDefinitionConfig_NodeProperties(rName), ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn +}`) +} + +func testAccJobDefinitionDataSourceConfig_basicARNEKS(rName string) string { + return acctest.ConfigCompose( + testAccJobDefinitionConfig_EKSProperties_basic(rName), ` +data "aws_batch_job_definition" "test" { + arn = aws_batch_job_definition.test.arn +} +`) +} diff --git a/internal/service/batch/service_endpoints_gen_test.go b/internal/service/batch/service_endpoints_gen_test.go index 2aa345e8ff61..6859b93b210a 100644 --- a/internal/service/batch/service_endpoints_gen_test.go +++ b/internal/service/batch/service_endpoints_gen_test.go @@ -4,16 +4,17 @@ package batch_test import ( "context" + "errors" "fmt" "maps" - "net/url" "os" "path/filepath" "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -202,33 +203,69 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }, } - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase + t.Run("v1", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase - t.Run(name, func(t *testing.T) { - testEndpointCase(t, region, testcase, callService) - }) - } + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV1) + }) + } + }) + + t.Run("v2", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV2) + }) + } + }) } func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() + r := batch_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(batch_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), batch_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return err.Error() } - url, _ := url.Parse(ep.URL) + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI.String() +} + +func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { + t.Helper() + + var endpoint string - if url.Path == "" { - url.Path = "/" + client := meta.BatchClient(ctx) + + _, err := client.ListJobs(ctx, &batch_sdkv2.ListJobsInput{}, + func(opts *batch_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } - return url.String() + return endpoint } -func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { +func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { t.Helper() client := meta.BatchConn(ctx) diff --git a/internal/service/batch/service_package_gen.go b/internal/service/batch/service_package_gen.go index f1eae349c43d..caf9acc0fb92 100644 --- a/internal/service/batch/service_package_gen.go +++ b/internal/service/batch/service_package_gen.go @@ -5,6 +5,8 @@ package batch import ( "context" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" @@ -16,7 +18,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newJobDefinitionDataSource, + Name: "Job Definition", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { @@ -88,6 +95,17 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*b return batch_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*batch_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) + + return batch_sdkv2.NewFromConfig(cfg, func(o *batch_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil +} + func ServicePackage(ctx context.Context) conns.ServicePackage { return &servicePackage{} } diff --git a/internal/service/redshiftserverless/service_endpoints_gen_test.go b/internal/service/redshiftserverless/service_endpoints_gen_test.go new file mode 100644 index 000000000000..962bde08d867 --- /dev/null +++ b/internal/service/redshiftserverless/service_endpoints_gen_test.go @@ -0,0 +1,523 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package redshiftserverless_test + +import ( + "context" + "errors" + "fmt" + "maps" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + redshiftserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" + redshiftserverless_sdkv1 "github.com/aws/aws-sdk-go/service/redshiftserverless" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) string + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "redshiftserverless" + awsEnvVar = "AWS_ENDPOINT_URL_REDSHIFT_SERVERLESS" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "redshift_serverless" +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + const region = "us-west-2" //lintignore:AWSAT003 + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(region), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + } + + t.Run("v1", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV1) + }) + } + }) + + t.Run("v2", func(t *testing.T) { + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, region, testcase, callServiceV2) + }) + } + }) +} + +func defaultEndpoint(region string) string { + r := redshiftserverless_sdkv2.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), redshiftserverless_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) + if err != nil { + return err.Error() + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI.String() +} + +func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { + t.Helper() + + var endpoint string + + client := meta.RedshiftServerlessClient(ctx) + + _, err := client.ListNamespaces(ctx, &redshiftserverless_sdkv2.ListNamespacesInput{}, + func(opts *redshiftserverless_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &endpoint), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return endpoint +} + +func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) string { + t.Helper() + + client := meta.RedshiftServerlessConn(ctx) + + req, _ := client.ListNamespacesRequest(&redshiftserverless_sdkv1.ListNamespacesInput{}) + + req.HTTPRequest.URL.Path = "/" + + endpoint := req.HTTPRequest.URL.String() + + return endpoint +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config["endpoints"]; !ok { + setup.config["endpoints"] = []any{ + map[string]any{}, + } + } + endpoints := setup.config["endpoints"].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func expectDefaultEndpoint(region string) caseExpectations { + return caseExpectations{ + endpoint: defaultEndpoint(region), + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + } +} + +func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + ctx := context.Background() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + "access_key": servicemocks.MockStaticAccessKey, + "secret_key": servicemocks.MockStaticSecretKey, + "region": region, + "skip_credentials_validation": true, + "skip_requesting_account_id": true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config["profile"] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := provider.New(ctx) + if err != nil { + t.Fatal(err) + } + + expectedDiags := testcase.expected.diags + expectedDiags = append( + expectedDiags, + errs.NewWarningDiagnostic( + "AWS account ID not found for provider", + "See https://registry.terraform.io/providers/hashicorp/aws/latest/docs#skip_requesting_account_id for implications.", + ), + ) + + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + endpoint := callF(ctx, t, meta) + + if endpoint != testcase.expected.endpoint { + t.Errorf("expected endpoint %q, got %q", testcase.expected.endpoint, endpoint) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +var errCancelOperation = fmt.Errorf("Test: Cancelling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + buf.WriteString(fmt.Sprintf("endpoint_url = %s\n", config.baseUrl)) + } + + if config.serviceUrl != "" { + buf.WriteString(fmt.Sprintf(` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint)) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)["shared_config_files"]; !ok { + (*config)["shared_config_files"] = []any{file.Name()} + } else { + (*config)["shared_config_files"] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/names/data/names_data.csv b/names/data/names_data.csv index 492ce36e5d7a..0da374d8d812 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -34,7 +34,7 @@ autoscaling-plans,autoscalingplans,autoscalingplans,autoscalingplans,,autoscalin ,,,,,,,,,,,,,,,,,Backint Agent for SAP HANA,AWS,x,,,,,,,,,No SDK support backup,backup,backup,backup,,backup,,,Backup,Backup,,1,,,aws_backup_,,backup_,Backup,AWS,,,,,,,Backup,ListBackupPlans,, backup-gateway,backupgateway,backupgateway,backupgateway,,backupgateway,,,BackupGateway,BackupGateway,,1,,,aws_backupgateway_,,backupgateway_,Backup Gateway,AWS,,x,,,,,Backup Gateway,,, -batch,batch,batch,batch,,batch,,,Batch,Batch,,1,,,aws_batch_,,batch_,Batch,AWS,,,,,,,Batch,ListJobs,, +batch,batch,batch,batch,,batch,,,Batch,Batch,,1,2,,aws_batch_,,batch_,Batch,AWS,,,,,,,Batch,ListJobs,, bedrock,bedrock,bedrock,bedrock,,bedrock,,,Bedrock,Bedrock,,,2,,aws_bedrock_,,bedrock_,Amazon Bedrock,Amazon,,,,,,,Bedrock,ListFoundationModels,, bedrock-agent,bedrockagent,bedrockagent,bedrockagent,,bedrockagent,,,BedrockAgent,BedrockAgent,,,2,,aws_bedrockagent_,,bedrock_agent_,Agents for Amazon Bedrock,Amazon,,,,,,,Bedrock Agent,ListAgents,, billingconductor,billingconductor,billingconductor,,,billingconductor,,,BillingConductor,BillingConductor,,1,,,aws_billingconductor_,,billingconductor_,Billing Conductor,AWS,,x,,,,,billingconductor,,, @@ -389,4 +389,4 @@ workspaces-web,workspacesweb,workspacesweb,workspacesweb,,workspacesweb,,,WorkSp xray,xray,xray,xray,,xray,,,XRay,XRay,,,2,,aws_xray_,,xray_,X-Ray,AWS,,,,,,,XRay,ListResourcePolicies,, verifiedpermissions,verifiedpermissions,verifiedpermissions,verifiedpermissions,,verifiedpermissions,,,VerifiedPermissions,VerifiedPermissions,,,2,,aws_verifiedpermissions_,,verifiedpermissions_,Verified Permissions,Amazon,,,,,,,VerifiedPermissions,ListPolicyStores,, codecatalyst,codecatalyst,codecatalyst,codecatalyst,,codecatalyst,,,CodeCatalyst,CodeCatalyst,,,2,,aws_codecatalyst_,,codecatalyst_,CodeCatalyst,Amazon,,,,,,,CodeCatalyst,ListAccessTokens,, -mediapackagev2,mediapackagev2,mediapackagev2,mediapackagev2,,mediapackagev2,,,MediaPackageV2,MediaPackageV2,,,2,aws_media_packagev2_,aws_mediapackagev2_,,media_packagev2_,Elemental MediaPackage Version 2,AWS,,,,,,,MediaPackageV2,ListChannelGroups,, \ No newline at end of file +mediapackagev2,mediapackagev2,mediapackagev2,mediapackagev2,,mediapackagev2,,,MediaPackageV2,MediaPackageV2,,,2,aws_media_packagev2_,aws_mediapackagev2_,,media_packagev2_,Elemental MediaPackage Version 2,AWS,,,,,,,MediaPackageV2,ListChannelGroups,, diff --git a/names/names.go b/names/names.go index 1de5196518b0..91a76aa4c117 100644 --- a/names/names.go +++ b/names/names.go @@ -33,6 +33,7 @@ const ( AppRunnerEndpointID = "apprunner" AthenaEndpointID = "athena" AuditManagerEndpointID = "auditmanager" + BatchEndpointID = "batch" BedrockEndpointID = "bedrock" BudgetsEndpointID = "budgets" ChimeSDKVoiceEndpointID = "voice-chime" diff --git a/website/docs/d/batch_job_definition.html.markdown b/website/docs/d/batch_job_definition.html.markdown new file mode 100644 index 000000000000..b45a798013f8 --- /dev/null +++ b/website/docs/d/batch_job_definition.html.markdown @@ -0,0 +1,274 @@ +--- +subcategory: "Batch" +layout: "aws" +page_title: "AWS: aws_batch_job_definition" +description: |- + Terraform data source for managing an AWS Batch Job Definition. +--- + +# Data Source: aws_batch_job_definition + +Terraform data source for managing an AWS Batch Job Definition. + +## Example Usage + +### Lookup via Arn + +```terraform +data "aws_batch_job_definition" "arn" { + arn = "arn:aws:batch:us-east-1:012345678910:job-definition/example" +} +``` + +### Lookup via Name + +```terraform +data "aws_batch_job_definition" "name" { + name = "example" + revision = 2 +} +``` + +## Argument Reference + +The following arguments are optional: + +* `arn` - ARN of the Job Definition. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `revision` - The revision of the job definition. +* `name` - The name of the job definition to register. It can be up to 128 letters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_). +* `status` - The status of the job definition. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `container_orchestration_type` - The orchestration type of the compute environment. +* `scheduling_priority` - The scheduling priority for jobs that are submitted with this job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. +* `id` - The ARN +* `eks_properties` - An [object](#eks_properties) with various properties that are specific to Amazon EKS based jobs. This must not be specified for Amazon ECS based job definitions. +* `node_properties` - An [object](#node_properties) with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the AWS Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties. +* `retry_strategy` - The [retry strategy](#retry_strategy) to use for failed jobs that are submitted with this job definition. Any retry strategy that's specified during a SubmitJob operation overrides the retry strategy defined here. If a job is terminated due to a timeout, it isn't retried. +* `timeout` - The [timeout configuration](#timeout) for jobs that are submitted with this job definition, after which AWS Batch terminates your jobs if they have not finished. If a job is terminated due to a timeout, it isn't retried. The minimum value for the timeout is 60 seconds. + +### eks_properties + +* `pod_properties` - The [properties](#pod_properties) for the Kubernetes pod resources of a job. + +### pod_properties + +* `dns_policy` - The DNS policy for the pod. The default value is ClusterFirst. If the hostNetwork parameter is not specified, the default is ClusterFirstWithHostNet. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. +* `host_network` - Indicates if the pod uses the hosts' network IP address. The default value is true. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. +* `service_account_name` - The name of the service account that's used to run the pod. +* `containers` - The properties of the container that's used on the Amazon EKS pod. Array of [EksContainer](#container) objects. +* `metadata` - [Metadata](#eks_metadata) about the Kubernetes pod. +* `volumes` - Specifies the volumes for a job definition that uses Amazon EKS resources. Array of [EksVolume](#eks_volumes) objects. + +### eks_container + +* `args` - An array of arguments to the entrypoint +* `commands` - The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. +* `env` - The environment variables to pass to a container. Array of [EksContainerEnvironmentVariable](#eks_environment) objects. +* `image` - The Docker image used to start the container. +* `image_pull_policy` - The image pull policy for the container. +* `name` - The name of the container. +* `resources` - The type and amount of [resources](#eks_resources) to assign to a container. +* `security_context` - The [security context](#eks_security_context) for a job. +* `volume_mounts` - The [volume mounts](#eks_volume_mounts) for the container. + +### eks_metadata + +* `labels` - Key-value pairs used to identify, sort, and organize cube resources. + +### eks_volumes + +* `name` - The name of the volume. The name must be allowed as a DNS subdomain name. +* `empty_dir` - Specifies the configuration of a Kubernetes [emptyDir volume](#eks_volume_empty_dir). +* `host_path` - Specifies the configuration of a Kubernetes [hostPath volume](#eks_volume_host_path). +* `secret` - Specifies the configuration of a Kubernetes [secret volume](#eks_volume_secret). + +### eks_volume_empty_dir + +* `medium` - The medium to store the volume. +* `size_limit` - The maximum size of the volume. By default, there's no maximum size defined. + +### eks_volume_host_path + +* `path` - The path of the file or directory on the host to mount into containers on the pod. + +### eks_volume_secret + +* `secret_name` - The name of the secret. The name must be allowed as a DNS subdomain name +* `optional` - Specifies whether the secret or the secret's keys must be defined. + +### eks_environment + +* `name` - The name of the environment variable. +* `value` - The value of the environment variable. + +### eks_resources + +* `limits` - The type and quantity of the resources to reserve for the container. +* `requests` - The type and quantity of the resources to request for the container. + +### eks_security_context + +* `privileged` - When this parameter is true, the container is given elevated permissions on the host container instance. The level of permissions are similar to the root user permissions. The default value is false. +* `read_only_root_filesystem` - When this parameter is true, the container is given read-only access to its root file system. The default value is false. +* `run_as_user` - When this parameter is specified, the container is run as the specified user ID (uid). If this parameter isn't specified, the default is the user that's specified in the image metadata. +* `run_as_group` - When this parameter is specified, the container is run as the specified group ID (gid). If this parameter isn't specified, the default is the group that's specified in the image metadata. +* `run_as_non_root` - When this parameter is specified, the container is run as a user with a uid other than 0. If this parameter isn't specified, so such rule is enforced. + +### eks_volume_mounts + +* `mount_path` - The path on the container where the volume is mounted. +* `name` - The name the volume mount. +* `read_only` - If this value is true, the container has read-only access to the volume. Otherwise, the container can write to the volume. + +### node_properties + +* `main_node` - Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer than the number of nodes. +* `node_range_properties` - A list of node ranges and their [properties](#node_range_properties) that are associated with a multi-node parallel job. +* `num_nodes` - The number of nodes that are associated with a multi-node parallel job. + +### node_range_properties + +* `target_nodes` - The range of nodes, using node index values. A range of 0:3 indicates nodes with index values of 0 through 3. I +* `container` - The [container details](#container) for the node range. + +### container + +* `command` - The command that's passed to the container. +* `environment` - The [environment](#environment) variables to pass to a container. +* `ephemeral_storage` - The amount of [ephemeral storage](#ephemeral_storage) to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate. +* `execution_role_arn` - The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. +* `fargate_platform_configuration` - The [platform configuration](#fargate_platform_configuration) for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter. +* `image` - The image used to start a container. +* `instance_type` - The instance type to use for a multi-node parallel job. +* `job_role_arn` - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. +* `linux_parameters` - [Linux-specific modifications](#linux_parameters) that are applied to the container. +* `log_configuration` - The [log configuration](#log_configuration) specification for the container. +* `mount_points` - The [mount points](#mount_points) for data volumes in your container. +* `network_configuration` - The [network configuration](#network_configuration) for jobs that are running on Fargate resources. +* `privileged` - When this parameter is true, the container is given elevated permissions on the host container instance (similar to the root user). +* `readonly_root_filesystem` - When this parameter is true, the container is given read-only access to its root file system. +* `resource_requirements` - The type and amount of [resources](#resource_requirements) to assign to a container. +* `runtime_platform` - An [object](#runtime_platform) that represents the compute environment architecture for AWS Batch jobs on Fargate. +* `secrets` - The [secrets](#secrets) for the container. +* `ulimits` - A list of [ulimits](#ulimits) to set in the container. +* `user` - The user name to use inside the container. +* `volumes` - A list of data [volumes](#volumes) used in a job. + +### environment + +* `name` - The name of the key-value pair. +* `value` - The value of the key-value pair. + +### ephemeral_storage + +* `size_in_gb` - The total amount, in GiB, of ephemeral storage to set for the task. + +### fargate_platform_configuration + +* `platform_version` - The AWS Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. + +### linux_parameters + +* `init_process_enabled` - If true, run an init process inside the container that forwards signals and reaps processes. +* `max_swap` - The total amount of swap memory (in MiB) a container can use. +* `shared_memory_size` - The value for the size (in MiB) of the `/dev/shm` volume. +* `swappiness` - You can use this parameter to tune a container's memory swappiness behavior. +* `devices` - Any of the [host devices](#devices) to expose to the container. +* `tmpfs` - The container path, mount options, and size (in MiB) of the [tmpfs](#tmpfs) mount. + +### log_configuration + +* `options` - The configuration options to send to the log driver. +* `log_driver` - The log driver to use for the container. +* `secret_options` - The secrets to pass to the log configuration. + +### network_configuration + +* `assign_public_ip` - Indicates whether the job has a public IP address. + +### mount_points + +* `container_path` - The path on the container where the host volume is mounted. +* `read_only` - If this value is true, the container has read-only access to the volume. +* `source_volume` - The name of the volume to mount. + +### resource_requirements + +* `type` - The type of resource to assign to a container. The supported resources include `GPU`, `MEMORY`, and `VCPU`. +* `value` - The quantity of the specified resource to reserve for the container. + +### secrets + +* `name` - The name of the secret. +* `value_from` - The secret to expose to the container. + +### ulimits + +* `hard_limit` - The hard limit for the ulimit type. +* `name` - The type of the ulimit. +* `soft_limit` - The soft limit for the ulimit type. + +### runtime_platform + +* `cpu_architecture` - The vCPU architecture. The default value is X86_64. Valid values are X86_64 and ARM64. +* `operating_system_family` - The operating system for the compute environment. V + +### secret_options + +* `name` - The name of the secret. +* `value_from` - The secret to expose to the container. The supported values are either the full Amazon Resource Name (ARN) of the AWS Secrets Manager secret or the full ARN of the parameter in the AWS Systems Manager Parameter Store. + +### devices + +* `host_path` - The path for the device on the host container instance. +* `container_path` - The path inside the container that's used to expose the host device. By default, the hostPath value is used. +* `permissions` - The explicit permissions to provide to the container for the device. + +### tmpfs + +* `container_path` - The absolute file path in the container where the tmpfs volume is mounted. +* `size` - The size (in MiB) of the tmpfs volume. +* `mount_options` - The list of tmpfs volume mount options. + +### volumes + +* `name` - The name of the volume. +* `host` - The contents of the host parameter determine whether your data volume persists on the host container instance and where it's stored. +* `efs_volume_configuration` - This [parameter](#efs_volume_configuration) is specified when you're using an Amazon Elastic File System file system for job storage. + +### host + +* `source_path` - The path on the host container instance that's presented to the container. + +### efs_volume_configuration + +* `file_system_id` - The Amazon EFS file system ID to use. +* `root_directory` - The directory within the Amazon EFS file system to mount as the root directory inside the host. +* `transit_encryption` - Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server +* `transit_encryption_port` - The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. +* `authorization_config` - The [authorization configuration](#authorization_config) details for the Amazon EFS file system. + +### authorization_config + +* `access_point_id` - The Amazon EFS access point ID to use. +* `iam` - Whether or not to use the AWS Batch job IAM role defined in a job definition when mounting the Amazon EFS file system. + +### retry_strategy + +* `attempts` - The number of times to move a job to the RUNNABLE status. +* `evaluate_on_exit` - Array of up to 5 [objects](#evaluate_on_exit) that specify the conditions where jobs are retried or failed. + +### evaluate_on_exit + +* `action` - Specifies the action to take if all of the specified conditions (onStatusReason, onReason, and onExitCode) are met. The values aren't case sensitive. +* `on_exit_code` - Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. +* `on_reason` - Contains a glob pattern to match against the Reason returned for a job. +* `on_status_reason` - Contains a glob pattern to match against the StatusReason returned for a job. + +### timeout + +* `attempt_duration_seconds` - The job timeout time (in seconds) that's measured from the job attempt's startedAt timestamp.