From bd781d42cda66a57bfcb3549d9fcc9eb60114f3b Mon Sep 17 00:00:00 2001 From: Jordan Date: Sun, 16 Apr 2023 15:02:29 +0100 Subject: [PATCH 01/65] Support Eventbridge Pipes parameters * Add support for Eventbridge Enrichment Parameters * Add support for Eventbridge Target Parameters * Add support for Eventbridge Source Parameters * Due to the amount of parameters add unit tests to ensure the mapping is correct to the right struct value * Update docs to support parameters --- go.mod | 3 + go.sum | 5 + internal/service/pipes/README.md | 1 + .../service/pipes/enrichment_parameters.go | 185 ++ .../pipes/enrichment_parameters_test.go | 143 + internal/service/pipes/flex.go | 158 +- internal/service/pipes/pipe.go | 187 +- internal/service/pipes/pipe_test.go | 2881 +++++++++++++++-- internal/service/pipes/source_parameters.go | 1505 +++++++++ .../service/pipes/source_parameters_test.go | 1500 +++++++++ internal/service/pipes/sweep.go | 2 - internal/service/pipes/target_parameters.go | 2061 ++++++++++++ .../service/pipes/target_parameters_test.go | 1110 +++++++ .../pipes/test-fixtures/lambdatest.zip | Bin 0 -> 342 bytes website/docs/r/pipes_pipe.html.markdown | 442 ++- 15 files changed, 9644 insertions(+), 539 deletions(-) create mode 100644 internal/service/pipes/enrichment_parameters.go create mode 100644 internal/service/pipes/enrichment_parameters_test.go create mode 100644 internal/service/pipes/source_parameters.go create mode 100644 internal/service/pipes/source_parameters_test.go create mode 100644 internal/service/pipes/target_parameters.go create mode 100644 internal/service/pipes/target_parameters_test.go create mode 100644 internal/service/pipes/test-fixtures/lambdatest.zip diff --git a/go.mod b/go.mod index 0c0d66969be..31f42105b08 100644 --- a/go.mod +++ b/go.mod @@ -68,6 +68,7 @@ require ( github.com/mitchellh/go-testing-interface v1.14.1 github.com/pquerna/otp v1.4.0 github.com/shopspring/decimal v1.3.1 + github.com/stretchr/testify v1.8.2 golang.org/x/crypto v0.8.0 golang.org/x/exp v0.0.0-20230206171751-46f607a40771 golang.org/x/tools v0.6.0 @@ -98,6 +99,7 @@ require ( github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cloudflare/circl v1.3.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/evanphx/json-patch v0.5.2 // indirect github.com/fatih/color v1.14.1 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -122,6 +124,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/posener/complete v1.1.1 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/spf13/cast v1.3.1 // indirect diff --git a/go.sum b/go.sum index 363c24a3e3c..9c074913092 100644 --- a/go.sum +++ b/go.sum @@ -329,14 +329,19 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= diff --git a/internal/service/pipes/README.md b/internal/service/pipes/README.md index 5eff3285fc5..d9709ed6b24 100644 --- a/internal/service/pipes/README.md +++ b/internal/service/pipes/README.md @@ -3,3 +3,4 @@ * AWS Provider: [Contribution Guide](https://hashicorp.github.io/terraform-provider-aws/#contribute) * Service User Guide: [Amazon EventBridge Pipes](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html) * Service API Guide: [Welcome](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/Welcome.html) + diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go new file mode 100644 index 00000000000..90a75e65786 --- /dev/null +++ b/internal/service/pipes/enrichment_parameters.go @@ -0,0 +1,185 @@ +package pipes + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/flex" +) + +var enrichment_parameters_schema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), + }, + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + }, + }, + }, + "path_parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_string": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + }, + }, + }, + }, + }, + }, + }, + }, +} + +func expandEnrichmentParameters(config []interface{}) *types.PipeEnrichmentParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeEnrichmentParameters + for _, c := range config { + param := c.(map[string]interface{}) + if val, ok := param["input_template"].(string); ok && val != "" { + parameters.InputTemplate = aws.String(val) + } + if val, ok := param["http_parameters"]; ok { + parameters.HttpParameters = expandEnrichmentHTTPParameters(val.([]interface{})) + } + } + return ¶meters +} + +func expandEnrichmentHTTPParameters(config []interface{}) *types.PipeEnrichmentHttpParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeEnrichmentHttpParameters + for _, c := range config { + param := c.(map[string]interface{}) + if val, ok := param["path_parameters"]; ok { + parameters.PathParameterValues = flex.ExpandStringValueList(val.([]interface{})) + } + + if val, ok := param["header"]; ok { + headers := map[string]string{} + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if key, ok := valueParam["key"].(string); ok && key != "" { + if value, ok := valueParam["value"].(string); ok && value != "" { + headers[key] = value + } + } + } + } + if len(headers) > 0 { + parameters.HeaderParameters = headers + } + } + + if val, ok := param["query_string"]; ok { + queryStrings := map[string]string{} + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if key, ok := valueParam["key"].(string); ok && key != "" { + if value, ok := valueParam["value"].(string); ok && value != "" { + queryStrings[key] = value + } + } + } + } + if len(queryStrings) > 0 { + parameters.QueryStringParameters = queryStrings + } + } + } + return ¶meters +} + +func flattenEnrichmentParameters(enrichmentParameters *types.PipeEnrichmentParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if enrichmentParameters.InputTemplate != nil { + config["input_template"] = *enrichmentParameters.InputTemplate + } + + if enrichmentParameters.HttpParameters != nil { + httpParameters := make(map[string]interface{}) + + var headerParameters []map[string]interface{} + for key, value := range enrichmentParameters.HttpParameters.HeaderParameters { + header := make(map[string]interface{}) + header["key"] = key + header["value"] = value + headerParameters = append(headerParameters, header) + } + httpParameters["header"] = headerParameters + + var queryStringParameters []map[string]interface{} + for key, value := range enrichmentParameters.HttpParameters.QueryStringParameters { + queryString := make(map[string]interface{}) + queryString["key"] = key + queryString["value"] = value + queryStringParameters = append(queryStringParameters, queryString) + } + httpParameters["query_string"] = queryStringParameters + httpParameters["path_parameters"] = flex.FlattenStringValueList(enrichmentParameters.HttpParameters.PathParameterValues) + + config["http_parameters"] = []map[string]interface{}{httpParameters} + } + + if len(config) == 0 { + return nil + } + + result := []map[string]interface{}{config} + return result +} diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go new file mode 100644 index 00000000000..8b3d80af451 --- /dev/null +++ b/internal/service/pipes/enrichment_parameters_test.go @@ -0,0 +1,143 @@ +package pipes + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +func Test_expandEnrichmentParameters(t *testing.T) { + tests := map[string]struct { + config map[string]interface{} + expected *types.PipeEnrichmentParameters + }{ + "input_template config": { + config: map[string]interface{}{ + "input_template": "some template", + }, + expected: &types.PipeEnrichmentParameters{ + InputTemplate: aws.String("some template"), + }, + }, + "http_parameters config": { + config: map[string]interface{}{ + "http_parameters": []interface{}{ + map[string]interface{}{ + "path_parameters": []interface{}{"a", "b"}, + "header": []interface{}{ + map[string]interface{}{ + "key": "key1", + "value": "value1", + }, + map[string]interface{}{ + "key": "key2", + "value": "value2", + }, + }, + "query_string": []interface{}{ + map[string]interface{}{ + "key": "key3", + "value": "value3", + }, + map[string]interface{}{ + "key": "key4", + "value": "value4", + }, + }, + }, + }, + }, + expected: &types.PipeEnrichmentParameters{ + HttpParameters: &types.PipeEnrichmentHttpParameters{ + PathParameterValues: []string{"a", "b"}, + HeaderParameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + QueryStringParameters: map[string]string{ + "key3": "value3", + "key4": "value4", + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := expandEnrichmentParameters([]interface{}{tt.config}) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func Test_flattenEnrichmentParameters(t *testing.T) { + tests := map[string]struct { + config *types.PipeEnrichmentParameters + expected []map[string]interface{} + }{ + "input_template config": { + config: &types.PipeEnrichmentParameters{ + InputTemplate: aws.String("some template"), + }, + expected: []map[string]interface{}{ + { + "input_template": "some template", + }, + }, + }, + "http_parameters config": { + config: &types.PipeEnrichmentParameters{ + HttpParameters: &types.PipeEnrichmentHttpParameters{ + PathParameterValues: []string{"a", "b"}, + HeaderParameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + QueryStringParameters: map[string]string{ + "key3": "value3", + "key4": "value4", + }, + }, + }, + expected: []map[string]interface{}{ + { + "http_parameters": []map[string]interface{}{ + { + "path_parameters": []interface{}{"a", "b"}, + "header": []map[string]interface{}{ + { + "key": "key1", + "value": "value1", + }, + { + "key": "key2", + "value": "value2", + }, + }, + "query_string": []map[string]interface{}{ + { + "key": "key3", + "value": "value3", + }, + { + "key": "key4", + "value": "value4", + }, + }, + }, + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := flattenEnrichmentParameters(tt.config) + + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/internal/service/pipes/flex.go b/internal/service/pipes/flex.go index b3e0de87060..63287740210 100644 --- a/internal/service/pipes/flex.go +++ b/internal/service/pipes/flex.go @@ -1,146 +1,40 @@ package pipes -import ( - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" -) - -func expandFilter(tfMap map[string]interface{}) *types.Filter { - if tfMap == nil { - return nil - } - - output := &types.Filter{} - - if v, ok := tfMap["pattern"].(string); ok && len(v) > 0 { - output.Pattern = aws.String(v) - } - - return output -} - -func flattenFilter(apiObject types.Filter) map[string]interface{} { - m := map[string]interface{}{} - - if v := apiObject.Pattern; v != nil { - m["pattern"] = aws.ToString(v) - } - - return m -} - -func expandFilters(tfList []interface{}) []types.Filter { - if len(tfList) == 0 { - return nil - } - - var s []types.Filter - - for _, v := range tfList { - a := expandFilter(v.(map[string]interface{})) - - if a == nil { - continue +func expandString(key string, param map[string]interface{}) *string { + if val, ok := param[key]; ok { + if value, ok := val.(string); ok { + if value != "" { + return &value + } } - - s = append(s, *a) } - - return s + return nil } -func flattenFilters(apiObjects []types.Filter) []interface{} { - if len(apiObjects) == 0 { - return nil - } - - var l []interface{} - - for _, apiObject := range apiObjects { - l = append(l, flattenFilter(apiObject)) - } - - return l -} - -func expandFilterCriteria(tfMap map[string]interface{}) *types.FilterCriteria { - if tfMap == nil { - return nil - } - - output := &types.FilterCriteria{} - - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { - output.Filters = expandFilters(v) - } - - return output -} - -func flattenFilterCriteria(apiObject *types.FilterCriteria) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - m["filter"] = flattenFilters(apiObject.Filters) - - return m -} - -func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceParameters { - if tfMap == nil { - return nil - } - - a := &types.PipeSourceParameters{} - - if v, ok := tfMap["filter_criteria"].([]interface{}); ok { - a.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) - } - - return a -} - -func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - if v := apiObject.FilterCriteria; v != nil { - m["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} +func expandInt32(key string, param map[string]interface{}) *int32 { + if val, ok := param[key]; ok { + if value, ok := val.(int); ok { + i := int32(value) + return &i + } } - - return m + return nil } -func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetParameters { - if tfMap == nil { - return nil - } - - a := &types.PipeTargetParameters{} - - if v, ok := tfMap["input_template"].(string); ok { - a.InputTemplate = aws.String(v) +func expandBool(key string, param map[string]interface{}) bool { + if val, ok := param[key]; ok { + if value, ok := val.(bool); ok { + return value + } } - - return a + return false } -func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { - if apiObject == nil { - return nil - } - - m := map[string]interface{}{} - - if v := apiObject.InputTemplate; v != nil { - m["input_template"] = aws.ToString(v) +func expandStringValue(key string, param map[string]interface{}) string { + if val, ok := param[key]; ok { + if value, ok := val.(string); ok { + return value + } } - - return m + return "" } diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 05f57f7354c..e6a4e125d7f 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -4,6 +4,7 @@ import ( "context" "errors" "log" + "regexp" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -63,7 +64,7 @@ func ResourcePipe() *schema.Resource { "enrichment": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1600), + ValidateFunc: verify.ValidARN, }, "name": { Type: schema.TypeString, @@ -71,7 +72,10 @@ func ResourcePipe() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validation.StringLenBetween(1, 64), + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64), + validation.StringMatch(regexp.MustCompile(`^[\.\-_A-Za-z0-9]+`), ""), + ), }, "name_prefix": { Type: schema.TypeString, @@ -79,7 +83,10 @@ func ResourcePipe() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"name"}, - ValidateFunc: validation.StringLenBetween(1, 64-id.UniqueIDSuffixLength), + ValidateFunc: validation.All( + validation.StringLenBetween(1, 64-id.UniqueIDSuffixLength), + validation.StringMatch(regexp.MustCompile(`^[\.\-_A-Za-z0-9]+`), ""), + ), }, "role_arn": { Type: schema.TypeString, @@ -87,65 +94,24 @@ func ResourcePipe() *schema.Resource { ValidateFunc: verify.ValidARN, }, "source": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 1600), - }, - "source_parameters": { - Type: schema.TypeList, + Type: schema.TypeString, Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter_criteria": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressEmptyConfigurationBlock("source_parameters.0.filter_criteria"), - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeList, - Optional: true, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 4096), - }, - }, - }, - }, - }, - }, - }, - }, - }, + ForceNew: true, + ValidateFunc: validation.Any( + verify.ValidARN, + validation.StringMatch(regexp.MustCompile(`^smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1})?:(\d{12})?:(.+)$`), ""), + ), }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), "target": { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringLenBetween(1, 1600), - }, - "target_parameters": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "input_template": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 8192), - }, - }, - }, + ValidateFunc: verify.ValidARN, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "source_parameters": source_parameters_schema, + "target_parameters": target_parameters_schema, + "enrichment_parameters": enrichment_parameters_schema, }, } } @@ -167,20 +133,24 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf Target: aws.String(d.Get("target").(string)), } - if v, ok := d.Get("description").(string); ok { - input.Description = aws.String(v) + if v, ok := d.GetOk("enrichment_parameters"); ok { + input.EnrichmentParameters = expandEnrichmentParameters(v.([]interface{})) } - if v, ok := d.Get("enrichment").(string); ok && v != "" { - input.Enrichment = aws.String(v) + if v, ok := d.GetOk("source_parameters"); ok { + input.SourceParameters = expandSourceParameters(v.([]interface{})) } - if v, ok := d.Get("source_parameters").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.SourceParameters = expandPipeSourceParameters(v[0].(map[string]interface{})) + if v, ok := d.GetOk("target_parameters"); ok { + input.TargetParameters = expandTargetParameters(v.([]interface{})) } - if v, ok := d.Get("target_parameters").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.TargetParameters = expandPipeTargetParameters(v[0].(map[string]interface{})) + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("enrichment"); ok && v != "" { + input.Enrichment = aws.String(v.(string)) } output, err := conn.CreatePipe(ctx, input) @@ -222,20 +192,34 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("enrichment", output.Enrichment) d.Set("name", output.Name) d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) + d.Set("role_arn", output.RoleArn) + d.Set("source", output.Source) + d.Set("target", output.Target) - if v := output.SourceParameters; v != nil { - if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(v)}); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + if output.SourceParameters != nil { + params := flattenSourceParameters(output.SourceParameters) + if params != nil { + if err := d.Set("source_parameters", params); err != nil { + return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + } } } - d.Set("role_arn", output.RoleArn) - d.Set("source", output.Source) - d.Set("target", output.Target) + if output.EnrichmentParameters != nil { + params := flattenEnrichmentParameters(output.EnrichmentParameters) + if params != nil { + if err := d.Set("enrichment_parameters", params); err != nil { + return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + } + } + } - if v := output.TargetParameters; v != nil { - if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(v)}); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + if output.TargetParameters != nil { + params := flattenTargetParameters(output.TargetParameters) + if params != nil { + if err := d.Set("target_parameters", params); err != nil { + return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) + } } } @@ -252,43 +236,49 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf Name: aws.String(d.Id()), RoleArn: aws.String(d.Get("role_arn").(string)), Target: aws.String(d.Get("target").(string)), - - // Omitting the SourceParameters entirely is interpreted as "no change". - SourceParameters: &types.UpdatePipeSourceParameters{}, - TargetParameters: &types.PipeTargetParameters{}, } if d.HasChange("enrichment") { // Reset state in case it's a deletion. - input.Enrichment = aws.String("") + input.Enrichment = nil + } + + if d.HasChange("enrichment_parameters") { + // Reset state in case it's a deletion. + input.EnrichmentParameters = nil + } + + // Reset state in case it's a deletion. + input.SourceParameters = &types.UpdatePipeSourceParameters{ + FilterCriteria: &types.FilterCriteria{ + Filters: nil, + }, } - if v, ok := d.Get("enrichment").(string); ok && v != "" { - input.Enrichment = aws.String(v) + // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. + input.TargetParameters = &types.PipeTargetParameters{ + InputTemplate: aws.String(""), } - if d.HasChange("source_parameters.0.filter_criteria") { - // To unset a parameter, it must be set to an empty object. Nulling a - // parameter will be interpreted as "no change". - input.SourceParameters.FilterCriteria = &types.FilterCriteria{} + if v, ok := d.GetOk("enrichment_parameters"); ok { + input.EnrichmentParameters = expandEnrichmentParameters(v.([]interface{})) } - if v, ok := d.Get("source_parameters.0.filter_criteria").([]interface{}); ok && len(v) > 0 && v[0] != nil { - input.SourceParameters.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + if v, ok := d.GetOk("source_parameters"); ok { + input.SourceParameters = expandSourceUpdateParameters(v.([]interface{})) } - if d.HasChange("target_parameters.0.input_template") { - input.TargetParameters.InputTemplate = aws.String("") + if v, ok := d.GetOk("target_parameters"); ok { + input.TargetParameters = expandTargetParameters(v.([]interface{})) } - if v, ok := d.Get("target_parameters.0.input_template").(string); ok { - input.TargetParameters.InputTemplate = aws.String(v) + if v, ok := d.GetOk("enrichment"); ok && v.(string) != "" { + input.Enrichment = aws.String(v.(string)) } log.Printf("[DEBUG] Updating EventBridge Pipes Pipe (%s): %#v", d.Id(), input) output, err := conn.UpdatePipe(ctx, input) - if err != nil { return create.DiagError(names.Pipes, create.ErrActionUpdating, ResNamePipe, d.Id(), err) } @@ -323,18 +313,3 @@ func resourcePipeDelete(ctx context.Context, d *schema.ResourceData, meta interf return nil } - -func suppressEmptyConfigurationBlock(key string) schema.SchemaDiffSuppressFunc { - return func(k, o, n string, d *schema.ResourceData) bool { - if k != key+".#" { - return false - } - - if o == "0" && n == "1" { - v := d.Get(key).([]interface{}) - return len(v) == 0 || v[0] == nil || len(v[0].(map[string]interface{})) == 0 - } - - return false - } -} diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index f85b8050724..88aec04c86f 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -55,7 +55,7 @@ func TestAccPipesPipe_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), - resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), ), }, { @@ -253,6 +253,16 @@ func TestAccPipesPipe_enrichment(t *testing.T) { name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" + headerKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + headerValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + headerKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + headerValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -264,10 +274,22 @@ func TestAccPipesPipe_enrichment(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_enrichment(name, 0), + Config: testAccPipeConfig_enrichment( + name, + 0, + headerKey, + headerValue, + queryStringKey, + queryStringValue, + ), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.0", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header.0.key", headerKey), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header.0.value", headerValue), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string.0.key", queryStringKey), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string.0.value", queryStringValue), ), }, { @@ -276,10 +298,22 @@ func TestAccPipesPipe_enrichment(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_enrichment(name, 1), + Config: testAccPipeConfig_enrichment( + name, + 1, + headerKeyModified, + headerValueModified, + queryStringKeyModified, + queryStringValueModified, + ), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.1", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header.0.key", headerKeyModified), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header.0.value", headerValueModified), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string.0.key", queryStringKeyModified), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string.0.value", queryStringValueModified), ), }, { @@ -363,18 +397,6 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - { - Config: testAccPipeConfig_sourceParameters_filterCriteria0(name), - Check: resource.ComposeTestCheckFunc( - testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "0"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, { Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test2"), Check: resource.ComposeTestCheckFunc( @@ -392,7 +414,6 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { Config: testAccPipeConfig_basic(name), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), ), }, { @@ -588,7 +609,7 @@ func TestAccPipesPipe_tags(t *testing.T) { }) } -func TestAccPipesPipe_target(t *testing.T) { +func TestAccPipesPipe_source_sqs_target_sqs(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -598,6 +619,16 @@ func TestAccPipesPipe_target(t *testing.T) { name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" + batchSize := 8 + batchWindow := 5 + dedupeID := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + messageGroupID := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + batchSizeModified := 9 + batchWindowModified := 6 + dedupeIDModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + messageGroupIDModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -609,10 +640,15 @@ func TestAccPipesPipe_target(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_source_sqs_target_sqs(name, batchSize, batchWindow, dedupeID, messageGroupID), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target_fifo", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue.0.message_deduplication_id", dedupeID), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue.0.message_group_id", messageGroupID), ), }, { @@ -621,10 +657,15 @@ func TestAccPipesPipe_target(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_target(name), + Config: testAccPipeConfig_source_sqs_target_sqs(name, batchSizeModified, batchWindowModified, dedupeIDModified, messageGroupIDModified), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target2", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target_fifo", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue.0.message_deduplication_id", dedupeIDModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue.0.message_group_id", messageGroupIDModified), ), }, { @@ -636,7 +677,7 @@ func TestAccPipesPipe_target(t *testing.T) { }) } -func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { +func TestAccPipesPipe_source_kinesis_target_kinesis(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -646,6 +687,20 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" + batchSize := 10 + batchWindow := 5 + maxRecordAge := -1 + parallelization := 2 + retries := 3 + partitionKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + batchSizeModified := 11 + batchWindowModified := 6 + maxRecordAgeModified := 65 + parallelizationModified := 3 + retriesModified := 4 + partitionKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -657,10 +712,21 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.first"), + Config: testAccPipeConfig_source_kinesis_target_kinesis(name, batchSize, batchWindow, maxRecordAge, parallelization, retries, partitionKey), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.first"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_kinesis_stream.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_kinesis_stream.target", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.kinesis_stream.0.dead_letter_config.0.arn", "aws_sqs_queue.deadletter", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_record_age_in_seconds", fmt.Sprintf("%d", maxRecordAge)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.parallelization_factor", fmt.Sprintf("%d", parallelization)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_retry_attempts", fmt.Sprintf("%d", retries)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.starting_position", "AT_TIMESTAMP"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.on_partial_batch_item_failure", "AUTOMATIC_BISECT"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.starting_position_timestamp", "2023-01-01T00:00:00Z"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream.0.partition_key", partitionKey), ), }, { @@ -669,10 +735,22 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.second"), + Config: testAccPipeConfig_source_kinesis_target_kinesis(name, batchSizeModified, batchWindowModified, maxRecordAgeModified, parallelizationModified, retriesModified, partitionKeyModified), Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.second"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_kinesis_stream.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_kinesis_stream.target", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.kinesis_stream.0.dead_letter_config.0.arn", "aws_sqs_queue.deadletter", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_record_age_in_seconds", fmt.Sprintf("%d", maxRecordAgeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.parallelization_factor", fmt.Sprintf("%d", parallelizationModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.maximum_retry_attempts", fmt.Sprintf("%d", retriesModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.starting_position", "AT_TIMESTAMP"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.on_partial_batch_item_failure", "AUTOMATIC_BISECT"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream.0.starting_position_timestamp", "2023-01-01T00:00:00Z"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream.0.partition_key", partitionKeyModified), ), }, { @@ -680,11 +758,82 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + }, + }) +} + +func TestAccPipesPipe_source_dynamo_target_cloudwatch_logs(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + batchSize := 8 + batchWindow := 5 + maxRecordAge := -1 + parallelization := 2 + retries := 3 + + batchSizeModified := 9 + batchWindowModified := 6 + maxRecordAgeModified := 65 + parallelizationModified := 3 + retriesModified := 4 + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_source_dynamo_target_cloudwatch_logs(name, batchSize, batchWindow, maxRecordAge, parallelization, retries), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), - resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.input_template"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_dynamodb_table.source", "stream_arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_log_group.target", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.dynamo_db_stream.0.dead_letter_config.0.arn", "aws_sqs_queue.deadletter", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_record_age_in_seconds", fmt.Sprintf("%d", maxRecordAge)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.parallelization_factor", fmt.Sprintf("%d", parallelization)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_retry_attempts", fmt.Sprintf("%d", retries)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.starting_position", "TRIM_HORIZON"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.on_partial_batch_item_failure", "AUTOMATIC_BISECT"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs.0.log_stream_name", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs.0.timestamp", "$.detail.timestamp"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_dynamo_target_cloudwatch_logs(name, batchSizeModified, batchWindowModified, maxRecordAgeModified, parallelizationModified, retriesModified), + Check: resource.ComposeTestCheckFunc( + + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_dynamodb_table.source", "stream_arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_log_group.target", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.dynamo_db_stream.0.dead_letter_config.0.arn", "aws_sqs_queue.deadletter", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_record_age_in_seconds", fmt.Sprintf("%d", maxRecordAgeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.parallelization_factor", fmt.Sprintf("%d", parallelizationModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.maximum_retry_attempts", fmt.Sprintf("%d", retriesModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.starting_position", "TRIM_HORIZON"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamo_db_stream.0.on_partial_batch_item_failure", "AUTOMATIC_BISECT"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs.0.log_stream_name", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs.0.timestamp", "$.detail.timestamp"), ), }, { @@ -696,98 +845,1840 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { }) } -func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_pipes_pipe" { - continue - } +func TestAccPipesPipe_source_active_mq_target_sqs(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } - _, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" - if tfresource.NotFound(err) { - continue - } + batchSize := 8 + batchWindow := 5 - if err != nil { - return err - } + batchSizeModified := 9 + batchWindowModified := 6 - return create.Error(names.Pipes, create.ErrActionCheckingDestroyed, tfpipes.ResNamePipe, rs.Primary.ID, errors.New("not destroyed")) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + acctest.PreCheckPartitionHasService(t, names.MQ) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID, names.MQ), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_active_mq_target_sqs(name, batchSize, batchWindow), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.queue", "test"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.active_mq_broker.0.credentials.0.basic_auth", "aws_secretsmanager_secret_version.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_active_mq_target_sqs(name, batchSizeModified, batchWindowModified), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.active_mq_broker.0.queue", "test"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.active_mq_broker.0.credentials.0.basic_auth", "aws_secretsmanager_secret_version.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - return nil +func TestAccPipesPipe_source_rabbit_mq_target_sqs(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") } -} -func testAccCheckPipeExists(ctx context.Context, name string, pipe *pipes.DescribePipeOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not found")) - } + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" - if rs.Primary.ID == "" { - return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not set")) - } + batchSize := 8 + batchWindow := 5 - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() + batchSizeModified := 9 + batchWindowModified := 6 - output, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + acctest.PreCheckPartitionHasService(t, names.MQ) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID, names.MQ), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_rabbit_mq_target_sqs(name, batchSize, batchWindow), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.queue", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.virtual_host", "/vhost"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.rabbit_mq_broker.0.credentials.0.basic_auth", "aws_secretsmanager_secret_version.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_rabbit_mq_target_sqs(name, batchSizeModified, batchWindowModified), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.queue", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbit_mq_broker.0.virtual_host", "/vhost"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.rabbit_mq_broker.0.credentials.0.basic_auth", "aws_secretsmanager_secret_version.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - if err != nil { - return err - } +func TestAccPipesPipe_source_managed_streaming_kafka_target_sqs(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } - *pipe = *output + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" - return nil - } -} + batchSize := 8 + batchWindow := 5 -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() + batchSizeModified := 9 + batchWindowModified := 6 - input := &pipes.ListPipesInput{} - _, err := conn.ListPipes(ctx, input) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + acctest.PreCheckPartitionHasService(t, names.Kafka) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID, names.Kafka), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_managed_streaming_kafka_target_sqs(name, batchSize, batchWindow), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_msk_cluster.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.topic", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.consumer_group_id", "amazon-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.starting_position", "TRIM_HORIZON"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_managed_streaming_kafka_target_sqs(name, batchSizeModified, batchWindowModified), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_msk_cluster.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.topic", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.consumer_group_id", "amazon-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka.0.starting_position", "TRIM_HORIZON"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) +func TestAccPipesPipe_source_self_managed_streaming_kafka_target_sqs(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") } - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" -const testAccPipeConfig_base = ` -data "aws_caller_identity" "main" {} -data "aws_partition" "main" {} + servers := "smk://test1:9092,test2:9092" + batchSize := 8 + batchWindow := 5 -resource "aws_iam_role" "test" { - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = { - Effect = "Allow" - Action = "sts:AssumeRole" - Principal = { - Service = "pipes.${data.aws_partition.main.dns_suffix}" - } - Condition = { - StringEquals = { - "aws:SourceAccount" = data.aws_caller_identity.main.account_id + batchSizeModified := 9 + batchWindowModified := 6 + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + acctest.PreCheckPartitionHasService(t, names.Kafka) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID, names.Kafka), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_self_managed_streaming_kafka_target_sqs(name, batchSize, batchWindow, servers), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source", servers), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.batch_size", fmt.Sprintf("%d", batchSize)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindow)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.topic", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.servers.0", "test:1234"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.self_managed_kafka.0.vpc.0.security_groups.0", "aws_security_group.test", "id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.vpc.0.subnets.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_self_managed_streaming_kafka_target_sqs(name, batchSizeModified, batchWindowModified, servers), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source", servers), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.batch_size", fmt.Sprintf("%d", batchSizeModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.maximum_batching_window_in_seconds", fmt.Sprintf("%d", batchWindowModified)), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.servers.0", "test:1234"), + resource.TestCheckResourceAttrPair(resourceName, "source_parameters.0.self_managed_kafka.0.vpc.0.security_groups.0", "aws_security_group.test", "id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka.0.vpc.0.subnets.#", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_batch_job(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + attempts := 2 + size := 3 + parameterKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + parameterValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + command := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + environmentName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + environmentValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + instanceType := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + attemptsModified := 4 + sizeModified := 5 + parameterKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + parameterValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commandModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + environmentNameModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + environmentValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + instanceTypeModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_batch_job( + name, + attempts, + size, + parameterKey, + parameterValue, + command, + environmentName, + environmentValue, + instanceType, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_batch_job_queue.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.batch_target.0.job_definition", "aws_batch_job_definition.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.job_name", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.retry_strategy.0.attempts", fmt.Sprintf("%d", attempts)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.array_properties.0.size", fmt.Sprintf("%d", size)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.parameters.0.key", parameterKey), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.parameters.0.value", parameterValue), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.depends_on.0.job_id", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.depends_on.0.type", "SEQUENTIAL"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.command.0", command), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.environment.0.name", environmentName), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.environment.0.value", environmentValue), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.instance_type", instanceType), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.resource_requirements.0.type", "VCPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.resource_requirements.0.value", "4"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_batch_job( + name, + attemptsModified, + sizeModified, + parameterKeyModified, + parameterValueModified, + commandModified, + environmentNameModified, + environmentValueModified, + instanceTypeModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_batch_job_queue.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.batch_target.0.job_definition", "aws_batch_job_definition.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.job_name", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.retry_strategy.0.attempts", fmt.Sprintf("%d", attemptsModified)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.array_properties.0.size", fmt.Sprintf("%d", sizeModified)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.parameters.0.key", parameterKeyModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.parameters.0.value", parameterValueModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.depends_on.0.job_id", name), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.depends_on.0.type", "SEQUENTIAL"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.command.0", commandModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.environment.0.name", environmentNameModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.environment.0.value", environmentValueModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.instance_type", instanceTypeModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.resource_requirements.0.type", "VCPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_target.0.container_overrides.0.resource_requirements.0.value", "4"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_event_bridge_event_bus(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + detailType := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + source := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + time := "$.detail.time" + + detailTypeModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + sourceModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + timeModified := "$.detail.timestamp" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_event_bridge_event_bus( + name, + detailType, + source, + time, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_event_bus.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.detail_type", detailType), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.endpoint_id", ""), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.event_bridge_event_bus.0.resources.0", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.source", source), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.time", time), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_event_bridge_event_bus( + name, + detailTypeModified, + sourceModified, + timeModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_event_bus.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.detail_type", detailTypeModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.endpoint_id", ""), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.event_bridge_event_bus.0.resources.0", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.source", sourceModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.event_bridge_event_bus.0.time", timeModified), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_http(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + headerKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + headerValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + headerKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + headerValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + queryStringValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_http( + name, + headerKey, + headerValue, + queryStringKey, + queryStringValue, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header.0.key", headerKey), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header.0.value", headerValue), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string.0.key", queryStringKey), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string.0.value", queryStringValue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_http( + name, + headerKeyModified, + headerValueModified, + queryStringKeyModified, + queryStringValueModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header.0.key", headerKeyModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header.0.value", headerValueModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string.0.key", queryStringKeyModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string.0.value", queryStringValueModified), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_lambda_function(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + invocationType := "REQUEST_RESPONSE" + invocationTypeModified := "FIRE_AND_FORGET" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_lambda_function( + name, + invocationType, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_lambda_function.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function.0.invocation_type", invocationType), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_lambda_function( + name, + invocationTypeModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_lambda_function.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function.0.invocation_type", invocationTypeModified), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_redshift(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + withEvent := false + withEventModified := true + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_redshift( + name, + withEvent, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_redshift_cluster.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.database", "redshiftdb"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.sqls.0", "SELECT * FROM table"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.statement_name", "NewStatement"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.database_user", "someUser"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.redshift_data.0.secret_manager_arn", "aws_secretsmanager_secret_version.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.with_event", fmt.Sprintf("%t", withEvent)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_redshift( + name, + withEventModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_redshift_cluster.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.database", "redshiftdb"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.sqls.0", "SELECT * FROM table"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.statement_name", "NewStatement"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.database_user", "someUser"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.redshift_data.0.secret_manager_arn", "aws_secretsmanager_secret_version.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data.0.with_event", fmt.Sprintf("%t", withEventModified)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_step_function(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + invocationType := "REQUEST_RESPONSE" + invocationTypeModified := "FIRE_AND_FORGET" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_step_function( + name, + invocationType, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sfn_state_machine.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function.0.invocation_type", invocationType), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_step_function( + name, + invocationTypeModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sfn_state_machine.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function.0.invocation_type", invocationTypeModified), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_source_sqs_target_ecs_task(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + enableEcsManagedTags := true + enableExecuteCommand := false + launchType := "FARGATE" + propagateTags := "TASK_DEFINITION" + referenceId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + taskCount := 1 + tagKey := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + tagValue := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + enableEcsManagedTagsModified := false + enableExecuteCommandModified := true + referenceIdModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + taskCountModified := 2 + tagKeyModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + tagValueModified := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_source_sqs_target_ecs_task( + name, + enableEcsManagedTags, + enableExecuteCommand, + launchType, + propagateTags, + referenceId, + taskCount, + tagKey, + tagValue, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_ecs_cluster.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.ecs_task.0.task_definition_arn", "aws_ecs_task_definition.task", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.enable_ecs_managed_tags", fmt.Sprintf("%t", enableEcsManagedTags)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.enable_execute_command", fmt.Sprintf("%t", enableExecuteCommand)), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.group"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.launch_type", launchType), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.assign_public_ip"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.propagate_tags", propagateTags), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.reference_id", referenceId), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.task_count", fmt.Sprintf("%d", taskCount)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.tags.0.key", tagKey), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.tags.0.value", tagValue), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.overrides"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_source_sqs_target_ecs_task( + name, + enableEcsManagedTagsModified, + enableExecuteCommandModified, + launchType, + propagateTags, + referenceIdModified, + taskCountModified, + tagKeyModified, + tagValueModified, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_ecs_cluster.test", "id"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.ecs_task.0.task_definition_arn", "aws_ecs_task_definition.task", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.enable_ecs_managed_tags", fmt.Sprintf("%t", enableEcsManagedTagsModified)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.enable_execute_command", fmt.Sprintf("%t", enableExecuteCommandModified)), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.group"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.launch_type", launchType), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.assign_public_ip"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.network_configuration.0.aws_vpc_configuration.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.propagate_tags", propagateTags), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.reference_id", referenceIdModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.task_count", fmt.Sprintf("%d", taskCountModified)), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.tags.0.key", tagKeyModified), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task.0.tags.0.value", tagValueModified), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.ecs_task.0.overrides"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var pipe pipes.DescribePipeOutput + name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.first"), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.first"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.second"), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.second"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_basic(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.input_template"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_pipes_pipe" { + continue + } + + _, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return create.Error(names.Pipes, create.ErrActionCheckingDestroyed, tfpipes.ResNamePipe, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckPipeExists(ctx context.Context, name string, pipe *pipes.DescribePipeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.Pipes, create.ErrActionCheckingExistence, tfpipes.ResNamePipe, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() + + output, err := tfpipes.FindPipeByName(ctx, conn, rs.Primary.ID) + if err != nil { + return err + } + + *pipe = *output + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient() + + input := &pipes.ListPipesInput{} + _, err := conn.ListPipes(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +const testAccPipeConfig_base = ` +data "aws_caller_identity" "main" {} +data "aws_partition" "main" {} + +resource "aws_iam_role" "test" { + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = { + Effect = "Allow" + Action = "sts:AssumeRole" + Principal = { + Service = "pipes.${data.aws_partition.main.dns_suffix}" + } + Condition = { + StringEquals = { + "aws:SourceAccount" = data.aws_caller_identity.main.account_id + } + } + } + }) +} +` + +const testAccPipeConfig_base_sqsSource = ` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ], + Resource = [ + aws_sqs_queue.source.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "source" {} +` + +const testAccPipeConfig_base_sqsTarget = ` +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:SendMessage", + ], + Resource = [ + aws_sqs_queue.target.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "target" {} +` + +const testAccPipeConfig_base_deadletter = ` +resource "aws_iam_role_policy" "deadletter" { + role = aws_iam_role.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:*" + ], + Resource = [ + aws_sqs_queue.deadletter.arn + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "deadletter" {}` + +func testAccPipeConfig_base_kafka(name string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(name, 2), fmt.Sprintf(` +resource "aws_iam_role_policy" "test" { + role = aws_iam_role.test.name + + policy = < 0 { + parameters.AdditionalBootstrapServers = flex.ExpandStringValueSet(value.(*schema.Set)) + } + + if val, ok := param["credentials"]; ok { + credentialsConfig := val.([]interface{}) + if len(credentialsConfig) != 0 { + for _, cc := range credentialsConfig { + credentialsParam := cc.(map[string]interface{}) + if _, ok := credentialsParam["basic_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth + credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth + credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth + credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["sasl_scram_256_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth + credentialsParameters.Value = expandStringValue("sasl_scram_256_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + } + } + } + + if val, ok := param["vpc"]; ok { + vpcConfig := val.([]interface{}) + if len(vpcConfig) != 0 { + var vpcParameters types.SelfManagedKafkaAccessConfigurationVpc + for _, vc := range vpcConfig { + vpcParam := vc.(map[string]interface{}) + if value, ok := vpcParam["security_groups"]; ok && value.(*schema.Set).Len() > 0 { + vpcParameters.SecurityGroup = flex.ExpandStringValueSet(value.(*schema.Set)) + } + if value, ok := vpcParam["subnets"]; ok && value.(*schema.Set).Len() > 0 { + vpcParameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) + } + } + parameters.Vpc = &vpcParameters + } + } + } + + return ¶meters +} + +func expandSourceSqsQueueParameters(config []interface{}) *types.PipeSourceSqsQueueParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeSourceSqsQueueParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + } + + return ¶meters +} + +func expandSourceDeadLetterConfig(config []interface{}) *types.DeadLetterConfig { + if len(config) == 0 { + return nil + } + + var parameters types.DeadLetterConfig + for _, c := range config { + param := c.(map[string]interface{}) + parameters.Arn = expandString("arn", param) + } + + return ¶meters +} + +func expandSourceFilterCriteria(config []interface{}) *types.FilterCriteria { + if len(config) == 0 { + return nil + } + + var parameters types.FilterCriteria + for _, c := range config { + param := c.(map[string]interface{}) + if val, ok := param["filter"]; ok { + filtersConfig := val.([]interface{}) + var filters []types.Filter + for _, f := range filtersConfig { + filterParam := f.(map[string]interface{}) + pattern := expandString("pattern", filterParam) + if pattern != nil { + filters = append(filters, types.Filter{ + Pattern: pattern, + }) + } + } + if len(filters) > 0 { + parameters.Filters = filters + } + } + } + + return ¶meters +} + +func flattenSourceParameters(sourceParameters *types.PipeSourceParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if sourceParameters.ActiveMQBrokerParameters != nil { + config["active_mq_broker"] = flattenSourceActiveMQBrokerParameters(sourceParameters.ActiveMQBrokerParameters) + } + + if sourceParameters.DynamoDBStreamParameters != nil { + config["dynamo_db_stream"] = flattenSourceDynamoDBStreamParameters(sourceParameters.DynamoDBStreamParameters) + } + + if sourceParameters.KinesisStreamParameters != nil { + config["kinesis_stream"] = flattenSourceKinesisStreamParameters(sourceParameters.KinesisStreamParameters) + } + + if sourceParameters.ManagedStreamingKafkaParameters != nil { + config["managed_streaming_kafka"] = flattenSourceManagedStreamingKafkaParameters(sourceParameters.ManagedStreamingKafkaParameters) + } + + if sourceParameters.RabbitMQBrokerParameters != nil { + config["rabbit_mq_broker"] = flattenSourceRabbitMQBrokerParameters(sourceParameters.RabbitMQBrokerParameters) + } + + if sourceParameters.SelfManagedKafkaParameters != nil { + config["self_managed_kafka"] = flattenSourceSelfManagedKafkaParameters(sourceParameters.SelfManagedKafkaParameters) + } + + if sourceParameters.SqsQueueParameters != nil { + config["sqs_queue"] = flattenSourceSqsQueueParameters(sourceParameters.SqsQueueParameters) + } + + if sourceParameters.FilterCriteria != nil { + criteria := flattenSourceFilterCriteria(sourceParameters.FilterCriteria) + if len(criteria) > 0 { + config["filter_criteria"] = criteria + } + } + + if len(config) == 0 { + return nil + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceActiveMQBrokerParameters(parameters *types.PipeSourceActiveMQBrokerParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.QueueName != nil { + config["queue"] = aws.ToString(parameters.QueueName) + } + if parameters.Credentials != nil { + credentialsConfig := make(map[string]interface{}) + switch v := parameters.Credentials.(type) { + case *types.MQBrokerAccessCredentialsMemberBasicAuth: + credentialsConfig["basic_auth"] = v.Value + } + config["credentials"] = []map[string]interface{}{credentialsConfig} + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceDynamoDBStreamParameters(parameters *types.PipeSourceDynamoDBStreamParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.MaximumRecordAgeInSeconds != nil { + config["maximum_record_age_in_seconds"] = aws.ToInt32(parameters.MaximumRecordAgeInSeconds) + } + if parameters.ParallelizationFactor != nil { + config["parallelization_factor"] = aws.ToInt32(parameters.ParallelizationFactor) + } + if parameters.MaximumRetryAttempts != nil { + config["maximum_retry_attempts"] = aws.ToInt32(parameters.MaximumRetryAttempts) + } + if parameters.StartingPosition != "" { + config["starting_position"] = parameters.StartingPosition + } + if parameters.OnPartialBatchItemFailure != "" { + config["on_partial_batch_item_failure"] = parameters.OnPartialBatchItemFailure + } + if parameters.DeadLetterConfig != nil { + config["dead_letter_config"] = flattenSourceDeadLetterConfig(parameters.DeadLetterConfig) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceKinesisStreamParameters(parameters *types.PipeSourceKinesisStreamParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.MaximumRecordAgeInSeconds != nil { + config["maximum_record_age_in_seconds"] = aws.ToInt32(parameters.MaximumRecordAgeInSeconds) + } + if parameters.ParallelizationFactor != nil { + config["parallelization_factor"] = aws.ToInt32(parameters.ParallelizationFactor) + } + if parameters.MaximumRetryAttempts != nil { + config["maximum_retry_attempts"] = aws.ToInt32(parameters.MaximumRetryAttempts) + } + if parameters.StartingPosition != "" { + config["starting_position"] = parameters.StartingPosition + } + if parameters.OnPartialBatchItemFailure != "" { + config["on_partial_batch_item_failure"] = parameters.OnPartialBatchItemFailure + } + if parameters.StartingPositionTimestamp != nil { + config["starting_position_timestamp"] = aws.ToTime(parameters.StartingPositionTimestamp).Format(time.RFC3339) + } + if parameters.DeadLetterConfig != nil { + config["dead_letter_config"] = flattenSourceDeadLetterConfig(parameters.DeadLetterConfig) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceManagedStreamingKafkaParameters(parameters *types.PipeSourceManagedStreamingKafkaParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.ConsumerGroupID != nil { + config["consumer_group_id"] = aws.ToString(parameters.ConsumerGroupID) + } + if parameters.StartingPosition != "" { + config["starting_position"] = parameters.StartingPosition + } + if parameters.TopicName != nil { + config["topic"] = aws.ToString(parameters.TopicName) + } + if parameters.Credentials != nil { + credentialsConfig := make(map[string]interface{}) + switch v := parameters.Credentials.(type) { + case *types.MSKAccessCredentialsMemberClientCertificateTlsAuth: + credentialsConfig["client_certificate_tls_auth"] = v.Value + case *types.MSKAccessCredentialsMemberSaslScram512Auth: + credentialsConfig["sasl_scram_512_auth"] = v.Value + } + config["credentials"] = []map[string]interface{}{credentialsConfig} + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceRabbitMQBrokerParameters(parameters *types.PipeSourceRabbitMQBrokerParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.QueueName != nil { + config["queue"] = aws.ToString(parameters.QueueName) + } + if parameters.VirtualHost != nil { + config["virtual_host"] = aws.ToString(parameters.VirtualHost) + } + if parameters.Credentials != nil { + credentialsConfig := make(map[string]interface{}) + switch v := parameters.Credentials.(type) { + case *types.MQBrokerAccessCredentialsMemberBasicAuth: + credentialsConfig["basic_auth"] = v.Value + } + config["credentials"] = []map[string]interface{}{credentialsConfig} + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceSelfManagedKafkaParameters(parameters *types.PipeSourceSelfManagedKafkaParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + if parameters.ConsumerGroupID != nil { + config["consumer_group_id"] = aws.ToString(parameters.ConsumerGroupID) + } + if parameters.StartingPosition != "" { + config["starting_position"] = parameters.StartingPosition + } + if parameters.TopicName != nil { + config["topic"] = aws.ToString(parameters.TopicName) + } + if parameters.AdditionalBootstrapServers != nil { + config["servers"] = flex.FlattenStringValueSet(parameters.AdditionalBootstrapServers) + } + if parameters.ServerRootCaCertificate != nil { + config["server_root_ca_certificate"] = aws.ToString(parameters.ServerRootCaCertificate) + } + + if parameters.Credentials != nil { + credentialsConfig := make(map[string]interface{}) + switch v := parameters.Credentials.(type) { + case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth: + credentialsConfig["basic_auth"] = v.Value + case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth: + credentialsConfig["client_certificate_tls_auth"] = v.Value + case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth: + credentialsConfig["sasl_scram_256_auth"] = v.Value + case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth: + credentialsConfig["sasl_scram_512_auth"] = v.Value + } + config["credentials"] = []map[string]interface{}{credentialsConfig} + } + if parameters.Vpc != nil { + vpcConfig := make(map[string]interface{}) + vpcConfig["security_groups"] = flex.FlattenStringValueSet(parameters.Vpc.SecurityGroup) + vpcConfig["subnets"] = flex.FlattenStringValueSet(parameters.Vpc.Subnets) + config["vpc"] = []map[string]interface{}{vpcConfig} + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceSqsQueueParameters(parameters *types.PipeSourceSqsQueueParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.BatchSize != nil { + config["batch_size"] = aws.ToInt32(parameters.BatchSize) + } + if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { + config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceFilterCriteria(parameters *types.FilterCriteria) []map[string]interface{} { + config := make(map[string]interface{}) + + if len(parameters.Filters) != 0 { + var filters []map[string]interface{} + for _, filter := range parameters.Filters { + pattern := make(map[string]interface{}) + pattern["pattern"] = aws.ToString(filter.Pattern) + filters = append(filters, pattern) + } + if len(filters) != 0 { + config["filter"] = filters + } + } + + result := []map[string]interface{}{config} + return result +} + +func flattenSourceDeadLetterConfig(parameters *types.DeadLetterConfig) []map[string]interface{} { + if parameters == nil { + return nil + } + + config := make(map[string]interface{}) + if parameters.Arn != nil { + config["arn"] = aws.ToString(parameters.Arn) + } + + result := []map[string]interface{}{config} + return result +} + +func expandSourceUpdateParameters(config []interface{}) *types.UpdatePipeSourceParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceParameters + for _, c := range config { + param, ok := c.(map[string]interface{}) + if !ok { + return nil + } + + if val, ok := param["active_mq_broker"]; ok { + parameters.ActiveMQBrokerParameters = expandSourceUpdateActiveMQBrokerParameters(val.([]interface{})) + } + + if val, ok := param["dynamo_db_stream"]; ok { + parameters.DynamoDBStreamParameters = expandSourceUpdateDynamoDBStreamParameters(val.([]interface{})) + } + + if val, ok := param["kinesis_stream"]; ok { + parameters.KinesisStreamParameters = expandSourceUpdateKinesisStreamParameters(val.([]interface{})) + } + + if val, ok := param["managed_streaming_kafka"]; ok { + parameters.ManagedStreamingKafkaParameters = expandSourceUpdateManagedStreamingKafkaParameters(val.([]interface{})) + } + + if val, ok := param["rabbit_mq_broker"]; ok { + parameters.RabbitMQBrokerParameters = expandSourceUpdateRabbitMQBrokerParameters(val.([]interface{})) + } + + if val, ok := param["self_managed_kafka"]; ok { + parameters.SelfManagedKafkaParameters = expandSourceUpdateSelfManagedKafkaParameters(val.([]interface{})) + } + + if val, ok := param["sqs_queue"]; ok { + parameters.SqsQueueParameters = expandSourceUpdateSqsQueueParameters(val.([]interface{})) + } + + if val, ok := param["filter_criteria"]; ok { + parameters.FilterCriteria = expandSourceFilterCriteria(val.([]interface{})) + } + } + return ¶meters +} + +func expandSourceUpdateActiveMQBrokerParameters(config []interface{}) *types.UpdatePipeSourceActiveMQBrokerParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceActiveMQBrokerParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + if val, ok := param["credentials"]; ok { + credentialsConfig := val.([]interface{}) + if len(credentialsConfig) != 0 { + var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth + for _, cc := range credentialsConfig { + credentialsParam := cc.(map[string]interface{}) + credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) + } + parameters.Credentials = &credentialsParameters + } + } + } + return ¶meters +} + +func expandSourceUpdateDynamoDBStreamParameters(config []interface{}) *types.UpdatePipeSourceDynamoDBStreamParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceDynamoDBStreamParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) + parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) + parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) + onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) + if onPartialBatchItemFailure != "" { + parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) + } + if val, ok := param["dead_letter_config"]; ok { + parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) + } + } + return ¶meters +} + +func expandSourceUpdateKinesisStreamParameters(config []interface{}) *types.UpdatePipeSourceKinesisStreamParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceKinesisStreamParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) + parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) + parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) + + onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) + if onPartialBatchItemFailure != "" { + parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) + } + if val, ok := param["dead_letter_config"]; ok { + parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) + } + } + return ¶meters +} + +func expandSourceUpdateManagedStreamingKafkaParameters(config []interface{}) *types.UpdatePipeSourceManagedStreamingKafkaParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceManagedStreamingKafkaParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + + if val, ok := param["credentials"]; ok { + credentialsConfig := val.([]interface{}) + if len(credentialsConfig) != 0 { + for _, cc := range credentialsConfig { + credentialsParam := cc.(map[string]interface{}) + if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { + var credentialsParameters types.MSKAccessCredentialsMemberClientCertificateTlsAuth + credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { + var credentialsParameters types.MSKAccessCredentialsMemberSaslScram512Auth + credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + } + } + } + } + return ¶meters +} + +func expandSourceUpdateRabbitMQBrokerParameters(config []interface{}) *types.UpdatePipeSourceRabbitMQBrokerParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceRabbitMQBrokerParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + + if val, ok := param["credentials"]; ok { + credentialsConfig := val.([]interface{}) + if len(credentialsConfig) != 0 { + var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth + for _, cc := range credentialsConfig { + credentialsParam := cc.(map[string]interface{}) + credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) + } + parameters.Credentials = &credentialsParameters + } + } + } + return ¶meters +} + +func expandSourceUpdateSelfManagedKafkaParameters(config []interface{}) *types.UpdatePipeSourceSelfManagedKafkaParameters { + if len(config) == 0 { + return nil + } + var parameters types.UpdatePipeSourceSelfManagedKafkaParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + parameters.ServerRootCaCertificate = expandString("server_root_ca_certificate", param) + + if val, ok := param["credentials"]; ok { + credentialsConfig := val.([]interface{}) + if len(credentialsConfig) != 0 { + for _, cc := range credentialsConfig { + credentialsParam := cc.(map[string]interface{}) + if _, ok := credentialsParam["basic_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth + credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth + credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth + credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + if _, ok := credentialsParam["sasl_scram_256_auth"]; ok { + var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth + credentialsParameters.Value = expandStringValue("sasl_scram_256_auth", credentialsParam) + parameters.Credentials = &credentialsParameters + } + } + } + } + + if val, ok := param["vpc"]; ok { + vpcConfig := val.([]interface{}) + if len(vpcConfig) != 0 { + var vpcParameters types.SelfManagedKafkaAccessConfigurationVpc + for _, vc := range vpcConfig { + vpcParam := vc.(map[string]interface{}) + if value, ok := vpcParam["security_groups"]; ok && value.(*schema.Set).Len() > 0 { + vpcParameters.SecurityGroup = flex.ExpandStringValueSet(value.(*schema.Set)) + } + if value, ok := vpcParam["subnets"]; ok && value.(*schema.Set).Len() > 0 { + vpcParameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) + } + } + parameters.Vpc = &vpcParameters + } + } + } + + return ¶meters +} + +func expandSourceUpdateSqsQueueParameters(config []interface{}) *types.UpdatePipeSourceSqsQueueParameters { + if len(config) == 0 { + return nil + } + + var parameters types.UpdatePipeSourceSqsQueueParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.BatchSize = expandInt32("batch_size", param) + parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + } + + return ¶meters +} diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go new file mode 100644 index 00000000000..611e859364a --- /dev/null +++ b/internal/service/pipes/source_parameters_test.go @@ -0,0 +1,1500 @@ +package pipes + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" +) + +func Test_expandSourceParameters(t *testing.T) { + tests := map[string]struct { + config map[string]interface{} + expected *types.PipeSourceParameters + }{ + "active_mq_broker config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "queue": "test", + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + ActiveMQBrokerParameters: &types.PipeSourceActiveMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + QueueName: aws.String("test"), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "dynamo_db_stream config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{ + map[string]interface{}{ + "starting_position": "LATEST", + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": "AUTOMATIC_BISECT", + "dead_letter_config": []interface{}{ + map[string]interface{}{ + "arn": "arn:queue", + }, + }, + }, + }, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + DynamoDBStreamParameters: &types.PipeSourceDynamoDBStreamParameters{ + StartingPosition: "LATEST", + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "kinesis_stream config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{ + map[string]interface{}{ + "starting_position": "AT_TIMESTAMP", + "starting_position_timestamp": "2020-01-01T00:00:00Z", + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": "AUTOMATIC_BISECT", + "dead_letter_config": []interface{}{ + map[string]interface{}{ + "arn": "arn:queue", + }, + }, + }, + }, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + KinesisStreamParameters: &types.PipeSourceKinesisStreamParameters{ + StartingPosition: "AT_TIMESTAMP", + StartingPositionTimestamp: aws.Time(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "managed_streaming_kafka config with client_certificate_tls_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "credentials": []interface{}{ + map[string]interface{}{ + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "managed_streaming_kafka config with sasl_scram_512_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "queue": "test", + "virtual_host": "hosting", + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + RabbitMQBrokerParameters: &types.PipeSourceRabbitMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + QueueName: aws.String("test"), + VirtualHost: aws.String("hosting"), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with basic_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with client_certificate_tls_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_512_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_256_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": "LATEST", + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_256_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "sqs_queue config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + }, + }, + "filter_criteria": []interface{}{}, + }, + expected: &types.PipeSourceParameters{ + SqsQueueParameters: &types.PipeSourceSqsQueueParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + }, + }, + }, + "filter_criteria config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{ + map[string]interface{}{ + "filter": []interface{}{ + map[string]interface{}{ + "pattern": "1", + }, + map[string]interface{}{ + "pattern": "2", + }, + }, + }, + }, + }, + expected: &types.PipeSourceParameters{ + FilterCriteria: &types.FilterCriteria{ + Filters: []types.Filter{ + { + Pattern: aws.String("1"), + }, + { + Pattern: aws.String("2"), + }, + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := expandSourceParameters([]interface{}{tt.config}) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func Test_flattenSourceParameters(t *testing.T) { + tests := map[string]struct { + config *types.PipeSourceParameters + expected []map[string]interface{} + }{ + "active_mq_broker config": { + expected: []map[string]interface{}{ + { + "active_mq_broker": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "queue": "test", + "credentials": []map[string]interface{}{ + { + "basic_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + ActiveMQBrokerParameters: &types.PipeSourceActiveMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + QueueName: aws.String("test"), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "dynamo_db_stream config": { + expected: []map[string]interface{}{ + { + "dynamo_db_stream": []map[string]interface{}{ + { + "starting_position": types.DynamoDBStreamStartPositionLatest, + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": types.OnPartialBatchItemFailureStreamsAutomaticBisect, + "dead_letter_config": []map[string]interface{}{ + { + "arn": "arn:queue", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + DynamoDBStreamParameters: &types.PipeSourceDynamoDBStreamParameters{ + StartingPosition: "LATEST", + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "kinesis_stream config": { + expected: []map[string]interface{}{ + { + "kinesis_stream": []map[string]interface{}{ + { + "starting_position": types.KinesisStreamStartPositionAtTimestamp, + "starting_position_timestamp": "2020-01-01T00:00:00Z", + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": types.OnPartialBatchItemFailureStreamsAutomaticBisect, + "dead_letter_config": []map[string]interface{}{ + { + "arn": "arn:queue", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + KinesisStreamParameters: &types.PipeSourceKinesisStreamParameters{ + StartingPosition: "AT_TIMESTAMP", + StartingPositionTimestamp: aws.Time(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "managed_streaming_kafka config with client_certificate_tls_auth authentication": { + expected: []map[string]interface{}{ + { + "managed_streaming_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.MSKStartPositionLatest, + "credentials": []map[string]interface{}{ + { + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "managed_streaming_kafka config with sasl_scram_512_auth authentication": { + expected: []map[string]interface{}{ + { + "managed_streaming_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.MSKStartPositionLatest, + "credentials": []map[string]interface{}{ + { + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker config": { + expected: []map[string]interface{}{ + { + "rabbit_mq_broker": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "queue": "test", + "virtual_host": "hosting", + "credentials": []map[string]interface{}{ + { + "basic_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + RabbitMQBrokerParameters: &types.PipeSourceRabbitMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + QueueName: aws.String("test"), + VirtualHost: aws.String("hosting"), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with basic_auth authentication": { + expected: []map[string]interface{}{ + { + "self_managed_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.SelfManagedKafkaStartPositionLatest, + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []map[string]interface{}{ + { + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []map[string]interface{}{ + { + "basic_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with client_certificate_tls_auth authentication": { + expected: []map[string]interface{}{ + { + "self_managed_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.SelfManagedKafkaStartPositionLatest, + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []map[string]interface{}{ + { + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []map[string]interface{}{ + { + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_512_auth authentication": { + expected: []map[string]interface{}{ + { + "self_managed_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.SelfManagedKafkaStartPositionLatest, + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []map[string]interface{}{ + { + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []map[string]interface{}{ + { + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_256_auth authentication": { + expected: []map[string]interface{}{ + { + "self_managed_kafka": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "topic": "test", + "consumer_group_id": "group", + "starting_position": types.SelfManagedKafkaStartPositionLatest, + "server_root_ca_certificate": "arn:ca:cert", + "servers": schema.NewSet(schema.HashString, []interface{}{ + "server1", + "server2", + }), + "vpc": []map[string]interface{}{ + { + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []map[string]interface{}{ + { + "sasl_scram_256_auth": "arn:secrets", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + TopicName: aws.String("test"), + ConsumerGroupID: aws.String("group"), + StartingPosition: "LATEST", + ServerRootCaCertificate: aws.String("arn:ca:cert"), + AdditionalBootstrapServers: []string{"server2", "server1"}, + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "sqs_queue config": { + expected: []map[string]interface{}{ + { + "sqs_queue": []map[string]interface{}{ + { + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + SqsQueueParameters: &types.PipeSourceSqsQueueParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + }, + }, + }, + "filter_criteria config": { + expected: []map[string]interface{}{ + { + "filter_criteria": []map[string]interface{}{ + { + "filter": []map[string]interface{}{ + { + "pattern": "1", + }, + { + "pattern": "2", + }, + }, + }, + }, + }, + }, + config: &types.PipeSourceParameters{ + FilterCriteria: &types.FilterCriteria{ + Filters: []types.Filter{ + { + Pattern: aws.String("1"), + }, + { + Pattern: aws.String("2"), + }, + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := flattenSourceParameters(tt.config) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func Test_expandSourceUpdateParameters(t *testing.T) { + tests := map[string]struct { + config map[string]interface{} + expected *types.UpdatePipeSourceParameters + }{ + "active_mq_broker config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + ActiveMQBrokerParameters: &types.UpdatePipeSourceActiveMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "dynamo_db_stream config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": "AUTOMATIC_BISECT", + "dead_letter_config": []interface{}{ + map[string]interface{}{ + "arn": "arn:queue", + }, + }, + }, + }, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + DynamoDBStreamParameters: &types.UpdatePipeSourceDynamoDBStreamParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "kinesis_stream config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "maximum_record_age_in_seconds": int32(120), + "maximum_retry_attempts": int32(3), + "parallelization_factor": int32(1), + "on_partial_batch_item_failure": "AUTOMATIC_BISECT", + "dead_letter_config": []interface{}{ + map[string]interface{}{ + "arn": "arn:queue", + }, + }, + }, + }, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + KinesisStreamParameters: &types.UpdatePipeSourceKinesisStreamParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + MaximumRecordAgeInSeconds: aws.Int32(120), + MaximumRetryAttempts: aws.Int32(3), + ParallelizationFactor: aws.Int32(1), + OnPartialBatchItemFailure: "AUTOMATIC_BISECT", + DeadLetterConfig: &types.DeadLetterConfig{ + Arn: aws.String("arn:queue"), + }, + }, + }, + }, + "managed_streaming_kafka config with client_certificate_tls_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "credentials": []interface{}{ + map[string]interface{}{ + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.UpdatePipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "managed_streaming_kafka config with sasl_scram_512_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + ManagedStreamingKafkaParameters: &types.UpdatePipeSourceManagedStreamingKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "rabbit_mq_broker config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + RabbitMQBrokerParameters: &types.UpdatePipeSourceRabbitMQBrokerParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with basic_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "server_root_ca_certificate": "arn:ca:cert", + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "basic_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + ServerRootCaCertificate: aws.String("arn:ca:cert"), + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with client_certificate_tls_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "server_root_ca_certificate": "arn:ca:cert", + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "client_certificate_tls_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + ServerRootCaCertificate: aws.String("arn:ca:cert"), + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_512_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "server_root_ca_certificate": "arn:ca:cert", + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_512_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + ServerRootCaCertificate: aws.String("arn:ca:cert"), + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "self_managed_kafka config with sasl_scram_256_auth authentication": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + "server_root_ca_certificate": "arn:ca:cert", + "vpc": []interface{}{ + map[string]interface{}{ + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + "credentials": []interface{}{ + map[string]interface{}{ + "sasl_scram_256_auth": "arn:secrets", + }, + }, + }, + }, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + ServerRootCaCertificate: aws.String("arn:ca:cert"), + Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ + SecurityGroup: []string{"sg2", "sg1"}, + Subnets: []string{"subnet1", "subnet2"}, + }, + Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ + Value: "arn:secrets", + }, + }, + }, + }, + "sqs_queue config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{ + map[string]interface{}{ + "batch_size": int32(10), + "maximum_batching_window_in_seconds": int32(60), + }, + }, + "filter_criteria": []interface{}{}, + }, + expected: &types.UpdatePipeSourceParameters{ + SqsQueueParameters: &types.UpdatePipeSourceSqsQueueParameters{ + BatchSize: aws.Int32(10), + MaximumBatchingWindowInSeconds: aws.Int32(60), + }, + }, + }, + "filter_criteria config": { + config: map[string]interface{}{ + "active_mq_broker": []interface{}{}, + "dynamo_db_stream": []interface{}{}, + "kinesis_stream": []interface{}{}, + "managed_streaming_kafka": []interface{}{}, + "rabbit_mq_broker": []interface{}{}, + "self_managed_kafka": []interface{}{}, + "sqs_queue": []interface{}{}, + "filter_criteria": []interface{}{ + map[string]interface{}{ + "filter": []interface{}{ + map[string]interface{}{ + "pattern": "1", + }, + map[string]interface{}{ + "pattern": "2", + }, + }, + }, + }, + }, + expected: &types.UpdatePipeSourceParameters{ + FilterCriteria: &types.FilterCriteria{ + Filters: []types.Filter{ + { + Pattern: aws.String("1"), + }, + { + Pattern: aws.String("2"), + }, + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := expandSourceUpdateParameters([]interface{}{tt.config}) + + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/internal/service/pipes/sweep.go b/internal/service/pipes/sweep.go index e21ec0c1f53..eaf52f3319e 100644 --- a/internal/service/pipes/sweep.go +++ b/internal/service/pipes/sweep.go @@ -25,7 +25,6 @@ func init() { func sweepPipes(region string) error { client, err := sweep.SharedRegionalSweepClient(region) - if err != nil { return fmt.Errorf("getting client: %w", err) } @@ -38,7 +37,6 @@ func sweepPipes(region string) error { for paginator.HasMorePages() { page, err := paginator.NextPage(context.Background()) - if err != nil { errs = multierror.Append(errs, fmt.Errorf("listing Pipes for %s: %w", region, err)) break diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go new file mode 100644 index 00000000000..a1aa97c5e82 --- /dev/null +++ b/internal/service/pipes/target_parameters.go @@ -0,0 +1,2061 @@ +package pipes + +import ( + "regexp" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +var target_parameters_schema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_target": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_definition": { + Type: schema.TypeString, + Required: true, + }, + "job_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "retry_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attempts": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + }, + }, + }, + }, + "array_properties": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(2, 10000), + }, + }, + }, + }, + "parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "depends_on": { + Type: schema.TypeList, + Optional: true, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.BatchJobDependencyType](), + }, + }, + }, + }, + "container_overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + }, + "resource_requirements": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.BatchResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "cloudwatch_logs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_stream_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "timestamp": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), + }, + }, + }, + }, + "ecs_task": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_definition_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "capacity_provider_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 6, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_provider": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "base": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100000), + Default: 0, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 1000), + Default: 0, + }, + }, + }, + }, + "enable_ecs_managed_tags": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "enable_execute_command": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "group": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "launch_type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.LaunchType](), + }, + "network_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_vpc_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assign_public_ip": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AssignPublicIp](), + }, + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, + "subnets": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 16, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, + }, + }, + }, + }, + }, + }, + "placement_constraints": { + Type: schema.TypeList, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementConstraintType](), + }, + }, + }, + }, + "placement_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementStrategyType](), + }, + }, + }, + }, + "platform_version": { + Type: schema.TypeString, + Optional: true, + }, + "propagate_tags": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PropagateTags](), + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "task_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeString, + Optional: true, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "execution_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "task_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "inference_accelerator_overrides": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Optional: true, + }, + "device_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ecs_ephemeral_storage": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_in_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(21, 200), + }, + }, + }, + }, + "container_overrides": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeInt, + Optional: true, + }, + "memory": { + Type: schema.TypeInt, + Optional: true, + }, + "memory_reservation": { + Type: schema.TypeInt, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "environment_files": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsEnvironmentFileType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, + "resource_requirements": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "event_bridge_event_bus": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detail_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "endpoint_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 50), + validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9\-]+[\.][A-Za-z0-9\-]+$`), ""), + ), + }, + "resources": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, + }, + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + ), + }, + "time": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), + }, + }, + }, + }, + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + }, + }, + }, + "path_parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "query_string": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + }, + }, + }, + }, + }, + }, + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), + }, + "kinesis_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "lambda_function": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, + }, + }, + }, + "redshift_data": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "sqls": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 100000), + }, + }, + "database_user": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "secret_manager_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "statement_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "with_event": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "sage_maker_pipeline": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 200, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + }, + }, + }, + }, + }, + }, + "sqs_queue": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message_deduplication_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + "message_group_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + }, + }, + }, + "step_function": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, + }, + }, + }, + }, + }, +} + +func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetParameters + for _, c := range config { + param, ok := c.(map[string]interface{}) + if !ok { + return nil + } + + if val, ok := param["batch_target"]; ok { + parameters.BatchJobParameters = expandTargetBatchJobParameters(val.([]interface{})) + } + + if val, ok := param["cloudwatch_logs"]; ok { + parameters.CloudWatchLogsParameters = expandTargetCloudWatchLogsParameters(val.([]interface{})) + } + + if val, ok := param["ecs_task"]; ok { + parameters.EcsTaskParameters = expandTargetEcsTaskParameters(val.([]interface{})) + } + + if val, ok := param["event_bridge_event_bus"]; ok { + parameters.EventBridgeEventBusParameters = expandTargetEventBridgeEventBusParameters(val.([]interface{})) + } + + if val, ok := param["http_parameters"]; ok { + parameters.HttpParameters = expandTargetHTTPParameters(val.([]interface{})) + } + + if val, ok := param["input_template"].(string); ok && val != "" { + parameters.InputTemplate = aws.String(val) + } + + if val, ok := param["kinesis_stream"]; ok { + parameters.KinesisStreamParameters = expandTargetKinesisStreamParameters(val.([]interface{})) + } + + if val, ok := param["lambda_function"]; ok { + parameters.LambdaFunctionParameters = expandTargetLambdaFunctionParameters(val.([]interface{})) + } + + if val, ok := param["redshift_data"]; ok { + parameters.RedshiftDataParameters = expandTargetRedshiftDataParameters(val.([]interface{})) + } + + if val, ok := param["sage_maker_pipeline"]; ok { + parameters.SageMakerPipelineParameters = expandTargetSageMakerPipelineParameters(val.([]interface{})) + } + + if val, ok := param["sqs_queue"]; ok { + parameters.SqsQueueParameters = expandTargetSqsQueueParameters(val.([]interface{})) + } + + if val, ok := param["step_function"]; ok { + parameters.StepFunctionStateMachineParameters = expandTargetStepFunctionStateMachineParameters(val.([]interface{})) + } + } + return ¶meters +} + +func expandTargetBatchJobParameters(config []interface{}) *types.PipeTargetBatchJobParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetBatchJobParameters + for _, c := range config { + param := c.(map[string]interface{}) + + parameters.JobDefinition = expandString("job_definition", param) + parameters.JobName = expandString("job_name", param) + if val, ok := param["retry_strategy"]; ok { + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if attempts, ok := valueParam["attempts"].(int32); ok { + parameters.RetryStrategy = &types.BatchRetryStrategy{ + Attempts: attempts, + } + } + } + } + } + if val, ok := param["array_properties"]; ok { + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if size, ok := valueParam["size"].(int32); ok { + parameters.ArrayProperties = &types.BatchArrayProperties{ + Size: size, + } + } + } + } + } + + if val, ok := param["parameters"]; ok { + batchTargetParameters := map[string]string{} + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if key, ok := valueParam["key"].(string); ok && key != "" { + if value, ok := valueParam["value"].(string); ok && value != "" { + batchTargetParameters[key] = value + } + } + } + } + if len(batchTargetParameters) > 0 { + parameters.Parameters = batchTargetParameters + } + } + + if val, ok := param["depends_on"]; ok { + var dependsOn []types.BatchJobDependency + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var dependency types.BatchJobDependency + dependency.JobId = expandString("job_id", valueParam) + dependancyType := expandStringValue("type", valueParam) + if dependancyType != "" { + dependency.Type = types.BatchJobDependencyType(dependancyType) + } + dependsOn = append(dependsOn, dependency) + } + } + if len(dependsOn) > 0 { + parameters.DependsOn = dependsOn + } + } + + if val, ok := param["container_overrides"]; ok { + parameters.ContainerOverrides = expandTargetBatchContainerOverrides(val.([]interface{})) + } + } + + return ¶meters +} + +func expandTargetBatchContainerOverrides(config []interface{}) *types.BatchContainerOverrides { + if len(config) == 0 { + return nil + } + + var parameters types.BatchContainerOverrides + for _, c := range config { + param := c.(map[string]interface{}) + if value, ok := param["command"]; ok { + parameters.Command = flex.ExpandStringValueList(value.([]interface{})) + } + parameters.InstanceType = expandString("instance_type", param) + + if val, ok := param["environment"]; ok { + var environment []types.BatchEnvironmentVariable + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var env types.BatchEnvironmentVariable + env.Name = expandString("name", valueParam) + env.Value = expandString("value", valueParam) + environment = append(environment, env) + } + } + if len(environment) > 0 { + parameters.Environment = environment + } + } + + if val, ok := param["resource_requirements"]; ok { + var resourceRequirements []types.BatchResourceRequirement + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var resourceRequirement types.BatchResourceRequirement + resourceRequirementType := expandStringValue("type", valueParam) + if resourceRequirementType != "" { + resourceRequirement.Type = types.BatchResourceRequirementType(resourceRequirementType) + } + resourceRequirement.Value = expandString("value", valueParam) + resourceRequirements = append(resourceRequirements, resourceRequirement) + } + } + if len(resourceRequirements) > 0 { + parameters.ResourceRequirements = resourceRequirements + } + } + } + + return ¶meters +} + +func expandTargetCloudWatchLogsParameters(config []interface{}) *types.PipeTargetCloudWatchLogsParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetCloudWatchLogsParameters + for _, c := range config { + param := c.(map[string]interface{}) + + parameters.LogStreamName = expandString("log_stream_name", param) + parameters.Timestamp = expandString("timestamp", param) + } + + return ¶meters +} + +func expandTargetEcsTaskParameters(config []interface{}) *types.PipeTargetEcsTaskParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetEcsTaskParameters + for _, c := range config { + param := c.(map[string]interface{}) + + parameters.TaskDefinitionArn = expandString("task_definition_arn", param) + parameters.EnableECSManagedTags = expandBool("enable_ecs_managed_tags", param) + parameters.EnableExecuteCommand = expandBool("enable_execute_command", param) + parameters.Group = expandString("group", param) + launchType := expandStringValue("launch_type", param) + if launchType != "" { + parameters.LaunchType = types.LaunchType(launchType) + } + parameters.PlatformVersion = expandString("platform_version", param) + propagateTags := expandStringValue("propagate_tags", param) + if propagateTags != "" { + parameters.PropagateTags = types.PropagateTags(propagateTags) + } + parameters.ReferenceId = expandString("reference_id", param) + parameters.TaskCount = expandInt32("task_count", param) + + if val, ok := param["capacity_provider_strategy"]; ok { + parameters.CapacityProviderStrategy = expandTargetCapacityProviderStrategy(val.([]interface{})) + } + if val, ok := param["network_configuration"]; ok { + parameters.NetworkConfiguration = expandTargetNetworkConfiguration(val.([]interface{})) + } + if val, ok := param["placement_constraints"]; ok { + parameters.PlacementConstraints = expandTargetPlacementConstraints(val.([]interface{})) + } + if val, ok := param["placement_strategy"]; ok { + parameters.PlacementStrategy = expandTargetPlacementStrategies(val.([]interface{})) + } + if val, ok := param["tags"]; ok { + parameters.Tags = expandTargetECSTaskTags(val.([]interface{})) + } + if val, ok := param["overrides"]; ok { + parameters.Overrides = expandTargetECSTaskOverrides(val.([]interface{})) + } + } + + return ¶meters +} + +func expandTargetCapacityProviderStrategy(config []interface{}) []types.CapacityProviderStrategyItem { + if len(config) == 0 { + return nil + } + + var parameters []types.CapacityProviderStrategyItem + for _, c := range config { + param := c.(map[string]interface{}) + + var provider types.CapacityProviderStrategyItem + provider.CapacityProvider = expandString("capacity_provider", param) + base := expandInt32("base", param) + if base != nil { + provider.Base = aws.ToInt32(base) + } + weight := expandInt32("weight", param) + if weight != nil { + provider.Weight = aws.ToInt32(weight) + } + + parameters = append(parameters, provider) + } + + return parameters +} + +func expandTargetNetworkConfiguration(config []interface{}) *types.NetworkConfiguration { + if len(config) == 0 { + return nil + } + + var parameters types.NetworkConfiguration + for _, c := range config { + param := c.(map[string]interface{}) + + if val, ok := param["aws_vpc_configuration"]; ok { + parameters.AwsvpcConfiguration = expandTargetAWSVPCConfiguration(val.([]interface{})) + } + } + + return ¶meters +} + +func expandTargetAWSVPCConfiguration(config []interface{}) *types.AwsVpcConfiguration { + if len(config) == 0 { + return nil + } + + var parameters types.AwsVpcConfiguration + for _, c := range config { + param := c.(map[string]interface{}) + assignPublicIp := expandStringValue("assign_public_ip", param) + if assignPublicIp != "" { + parameters.AssignPublicIp = types.AssignPublicIp(assignPublicIp) + } + + if value, ok := param["security_groups"]; ok && value.(*schema.Set).Len() > 0 { + parameters.SecurityGroups = flex.ExpandStringValueSet(value.(*schema.Set)) + } + + if value, ok := param["subnets"]; ok && value.(*schema.Set).Len() > 0 { + parameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) + } + } + + return ¶meters +} + +func expandTargetPlacementConstraints(config []interface{}) []types.PlacementConstraint { + if len(config) == 0 { + return nil + } + + var parameters []types.PlacementConstraint + for _, c := range config { + param := c.(map[string]interface{}) + + var constraint types.PlacementConstraint + constraint.Expression = expandString("expression", param) + constraintType := expandStringValue("type", param) + if constraintType != "" { + constraint.Type = types.PlacementConstraintType(constraintType) + } + + parameters = append(parameters, constraint) + } + + return parameters +} + +func expandTargetPlacementStrategies(config []interface{}) []types.PlacementStrategy { + if len(config) == 0 { + return nil + } + + var parameters []types.PlacementStrategy + for _, c := range config { + param := c.(map[string]interface{}) + + var strategy types.PlacementStrategy + strategy.Field = expandString("field", param) + strategyType := expandStringValue("type", param) + if strategyType != "" { + strategy.Type = types.PlacementStrategyType(strategyType) + } + + parameters = append(parameters, strategy) + } + + return parameters +} + +func expandTargetECSTaskTags(config []interface{}) []types.Tag { + if len(config) == 0 { + return nil + } + + var parameters []types.Tag + for _, c := range config { + param := c.(map[string]interface{}) + + var tag types.Tag + tag.Key = expandString("key", param) + tag.Value = expandString("value", param) + + parameters = append(parameters, tag) + } + + return parameters +} + +func expandTargetECSTaskOverrides(config []interface{}) *types.EcsTaskOverride { + if len(config) == 0 { + return nil + } + + var parameters types.EcsTaskOverride + for _, c := range config { + param := c.(map[string]interface{}) + parameters.Cpu = expandString("cpu", param) + parameters.Memory = expandString("memory", param) + parameters.ExecutionRoleArn = expandString("execution_role_arn", param) + parameters.TaskRoleArn = expandString("task_role_arn", param) + + if val, ok := param["inference_accelerator_overrides"]; ok { + var inferenceAcceleratorOverrides []types.EcsInferenceAcceleratorOverride + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var override types.EcsInferenceAcceleratorOverride + override.DeviceName = expandString("device_name", valueParam) + override.DeviceType = expandString("device_type", valueParam) + inferenceAcceleratorOverrides = append(inferenceAcceleratorOverrides, override) + } + } + if len(inferenceAcceleratorOverrides) > 0 { + parameters.InferenceAcceleratorOverrides = inferenceAcceleratorOverrides + } + } + + if val, ok := param["ecs_ephemeral_storage"]; ok { + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if size, ok := valueParam["size_in_gib"].(int32); ok { + parameters.EphemeralStorage = &types.EcsEphemeralStorage{ + SizeInGiB: size, + } + } + } + } + } + + if val, ok := param["container_overrides"]; ok { + parameters.ContainerOverrides = expandTargetECSTaskOverrideContainerOverrides(val.([]interface{})) + } + } + + return ¶meters +} + +func expandTargetECSTaskOverrideContainerOverrides(config []interface{}) []types.EcsContainerOverride { + if len(config) == 0 { + return nil + } + + var parameters []types.EcsContainerOverride + for _, c := range config { + param := c.(map[string]interface{}) + + var override types.EcsContainerOverride + override.Cpu = expandInt32("cpu", param) + override.Memory = expandInt32("memory", param) + override.MemoryReservation = expandInt32("memory_reservation", param) + override.Name = expandString("name", param) + if value, ok := param["command"]; ok { + override.Command = flex.ExpandStringValueList(value.([]interface{})) + } + + if val, ok := param["environment"]; ok { + var environment []types.EcsEnvironmentVariable + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var env types.EcsEnvironmentVariable + env.Name = expandString("name", valueParam) + env.Value = expandString("value", valueParam) + environment = append(environment, env) + } + } + if len(environment) > 0 { + override.Environment = environment + } + } + + if val, ok := param["environment_files"]; ok { + var environment []types.EcsEnvironmentFile + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var env types.EcsEnvironmentFile + envType := expandStringValue("type", valueParam) + if envType != "" { + env.Type = types.EcsEnvironmentFileType(envType) + } + env.Value = expandString("value", valueParam) + environment = append(environment, env) + } + } + if len(environment) > 0 { + override.EnvironmentFiles = environment + } + } + + if val, ok := param["resource_requirements"]; ok { + var resourceRequirements []types.EcsResourceRequirement + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + var resourceRequirement types.EcsResourceRequirement + resourceRequirementType := expandStringValue("type", valueParam) + if resourceRequirementType != "" { + resourceRequirement.Type = types.EcsResourceRequirementType(resourceRequirementType) + } + resourceRequirement.Value = expandString("value", valueParam) + resourceRequirements = append(resourceRequirements, resourceRequirement) + } + } + if len(resourceRequirements) > 0 { + override.ResourceRequirements = resourceRequirements + } + } + + parameters = append(parameters, override) + } + + return parameters +} + +func expandTargetEventBridgeEventBusParameters(config []interface{}) *types.PipeTargetEventBridgeEventBusParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetEventBridgeEventBusParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.DetailType = expandString("detail_type", param) + parameters.EndpointId = expandString("endpoint_id", param) + parameters.Source = expandString("source", param) + parameters.Time = expandString("time", param) + if value, ok := param["resources"]; ok && value.(*schema.Set).Len() > 0 { + parameters.Resources = flex.ExpandStringValueSet(value.(*schema.Set)) + } + } + + return ¶meters +} + +func expandTargetHTTPParameters(config []interface{}) *types.PipeTargetHttpParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetHttpParameters + for _, c := range config { + param := c.(map[string]interface{}) + if val, ok := param["path_parameters"]; ok { + parameters.PathParameterValues = flex.ExpandStringValueList(val.([]interface{})) + } + + if val, ok := param["header"]; ok { + headers := map[string]string{} + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if key, ok := valueParam["key"].(string); ok && key != "" { + if value, ok := valueParam["value"].(string); ok && value != "" { + headers[key] = value + } + } + } + } + if len(headers) > 0 { + parameters.HeaderParameters = headers + } + } + + if val, ok := param["query_string"]; ok { + queryStrings := map[string]string{} + if values, ok := val.([]interface{}); ok { + for _, v := range values { + valueParam := v.(map[string]interface{}) + + if key, ok := valueParam["key"].(string); ok && key != "" { + if value, ok := valueParam["value"].(string); ok && value != "" { + queryStrings[key] = value + } + } + } + } + if len(queryStrings) > 0 { + parameters.QueryStringParameters = queryStrings + } + } + } + return ¶meters +} + +func expandTargetKinesisStreamParameters(config []interface{}) *types.PipeTargetKinesisStreamParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetKinesisStreamParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.PartitionKey = expandString("partition_key", param) + } + + return ¶meters +} + +func expandTargetLambdaFunctionParameters(config []interface{}) *types.PipeTargetLambdaFunctionParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetLambdaFunctionParameters + for _, c := range config { + param := c.(map[string]interface{}) + invocationType := expandStringValue("invocation_type", param) + if invocationType != "" { + parameters.InvocationType = types.PipeTargetInvocationType(invocationType) + } + } + + return ¶meters +} + +func expandTargetRedshiftDataParameters(config []interface{}) *types.PipeTargetRedshiftDataParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetRedshiftDataParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.Database = expandString("database", param) + parameters.DbUser = expandString("database_user", param) + parameters.SecretManagerArn = expandString("secret_manager_arn", param) + parameters.StatementName = expandString("statement_name", param) + parameters.WithEvent = expandBool("with_event", param) + if value, ok := param["sqls"]; ok && value.(*schema.Set).Len() > 0 { + parameters.Sqls = flex.ExpandStringValueSet(value.(*schema.Set)) + } + } + + return ¶meters +} + +func expandTargetSageMakerPipelineParameters(config []interface{}) *types.PipeTargetSageMakerPipelineParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetSageMakerPipelineParameters + for _, c := range config { + param := c.(map[string]interface{}) + if val, ok := param["parameters"]; ok { + parametersConfig := val.([]interface{}) + var params []types.SageMakerPipelineParameter + for _, p := range parametersConfig { + pp := p.(map[string]interface{}) + name := expandString("name", pp) + value := expandString("value", pp) + if name != nil { + params = append(params, types.SageMakerPipelineParameter{ + Name: name, + Value: value, + }) + } + } + parameters.PipelineParameterList = params + } + } + + return ¶meters +} + +func expandTargetSqsQueueParameters(config []interface{}) *types.PipeTargetSqsQueueParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetSqsQueueParameters + for _, c := range config { + param := c.(map[string]interface{}) + parameters.MessageDeduplicationId = expandString("message_deduplication_id", param) + parameters.MessageGroupId = expandString("message_group_id", param) + } + + return ¶meters +} + +func expandTargetStepFunctionStateMachineParameters(config []interface{}) *types.PipeTargetStateMachineParameters { + if len(config) == 0 { + return nil + } + + var parameters types.PipeTargetStateMachineParameters + for _, c := range config { + param := c.(map[string]interface{}) + invocationType := expandStringValue("invocation_type", param) + if invocationType != "" { + parameters.InvocationType = types.PipeTargetInvocationType(invocationType) + } + } + + return ¶meters +} + +func flattenTargetParameters(targetParameters *types.PipeTargetParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if targetParameters.BatchJobParameters != nil { + config["batch_target"] = flattenTargetBatchJobParameters(targetParameters.BatchJobParameters) + } + + if targetParameters.CloudWatchLogsParameters != nil { + config["cloudwatch_logs"] = flattenTargetCloudWatchLogsParameters(targetParameters.CloudWatchLogsParameters) + } + + if targetParameters.EcsTaskParameters != nil { + config["ecs_task"] = flattenTargetEcsTaskParameters(targetParameters.EcsTaskParameters) + } + + if targetParameters.EventBridgeEventBusParameters != nil { + config["event_bridge_event_bus"] = flattenTargetEventBridgeEventBusParameters(targetParameters.EventBridgeEventBusParameters) + } + + if targetParameters.HttpParameters != nil { + config["http_parameters"] = flattenTargetHttpParameters(targetParameters.HttpParameters) + } + + if targetParameters.InputTemplate != nil { + config["input_template"] = aws.ToString(targetParameters.InputTemplate) + } + + if targetParameters.KinesisStreamParameters != nil { + config["kinesis_stream"] = flattenTargetKinesisStreamParameters(targetParameters.KinesisStreamParameters) + } + + if targetParameters.LambdaFunctionParameters != nil { + config["lambda_function"] = flattenTargetLambdaFunctionParameters(targetParameters.LambdaFunctionParameters) + } + + if targetParameters.RedshiftDataParameters != nil { + config["redshift_data"] = flattenTargetRedshiftDataParameters(targetParameters.RedshiftDataParameters) + } + + if targetParameters.SageMakerPipelineParameters != nil { + config["sage_maker_pipeline"] = flattenTargetSageMakerPipelineParameters(targetParameters.SageMakerPipelineParameters) + } + + if targetParameters.SqsQueueParameters != nil { + config["sqs_queue"] = flattenTargetSqsQueueParameters(targetParameters.SqsQueueParameters) + } + + if targetParameters.StepFunctionStateMachineParameters != nil { + config["step_function"] = flattenTargetStepFunctionStateMachineParameters(targetParameters.StepFunctionStateMachineParameters) + } + + if len(config) == 0 { + return nil + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetBatchJobParameters(parameters *types.PipeTargetBatchJobParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.JobDefinition != nil { + config["job_definition"] = aws.ToString(parameters.JobDefinition) + } + if parameters.JobName != nil { + config["job_name"] = aws.ToString(parameters.JobName) + } + + var parameterValues []map[string]interface{} + for key, value := range parameters.Parameters { + p := make(map[string]interface{}) + p["key"] = key + p["value"] = value + parameterValues = append(parameterValues, p) + } + config["parameters"] = parameterValues + + if parameters.RetryStrategy != nil { + retryStrategyConfig := make(map[string]interface{}) + retryStrategyConfig["attempts"] = parameters.RetryStrategy.Attempts + config["retry_strategy"] = []map[string]interface{}{retryStrategyConfig} + } + + if parameters.ArrayProperties != nil { + arrayPropertiesConfig := make(map[string]interface{}) + arrayPropertiesConfig["size"] = parameters.ArrayProperties.Size + config["array_properties"] = []map[string]interface{}{arrayPropertiesConfig} + } + + var dependsOnValues []map[string]interface{} + for _, value := range parameters.DependsOn { + dependsOn := make(map[string]interface{}) + dependsOn["job_id"] = aws.ToString(value.JobId) + dependsOn["type"] = value.Type + dependsOnValues = append(dependsOnValues, dependsOn) + } + config["depends_on"] = dependsOnValues + + if parameters.ContainerOverrides != nil { + config["container_overrides"] = flattenTargetBatchContainerOverrides(parameters.ContainerOverrides) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetBatchContainerOverrides(parameters *types.BatchContainerOverrides) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.Command != nil { + config["command"] = flex.FlattenStringValueSet(parameters.Command) + } + if parameters.InstanceType != nil { + config["instance_type"] = aws.ToString(parameters.InstanceType) + } + + var environmentValues []map[string]interface{} + for _, value := range parameters.Environment { + env := make(map[string]interface{}) + env["name"] = aws.ToString(value.Name) + env["value"] = aws.ToString(value.Value) + environmentValues = append(environmentValues, env) + } + config["environment"] = environmentValues + + var resourceRequirementsValues []map[string]interface{} + for _, value := range parameters.ResourceRequirements { + rr := make(map[string]interface{}) + rr["type"] = value.Type + rr["value"] = aws.ToString(value.Value) + resourceRequirementsValues = append(resourceRequirementsValues, rr) + } + config["resource_requirements"] = resourceRequirementsValues + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetCloudWatchLogsParameters(parameters *types.PipeTargetCloudWatchLogsParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.LogStreamName != nil { + config["log_stream_name"] = aws.ToString(parameters.LogStreamName) + } + if parameters.Timestamp != nil { + config["timestamp"] = aws.ToString(parameters.Timestamp) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetEcsTaskParameters(parameters *types.PipeTargetEcsTaskParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.TaskDefinitionArn != nil { + config["task_definition_arn"] = aws.ToString(parameters.TaskDefinitionArn) + } + config["enable_ecs_managed_tags"] = parameters.EnableECSManagedTags + config["enable_execute_command"] = parameters.EnableExecuteCommand + if parameters.Group != nil { + config["group"] = aws.ToString(parameters.Group) + } + if parameters.LaunchType != "" { + config["launch_type"] = parameters.LaunchType + } + if parameters.PlatformVersion != nil { + config["platform_version"] = aws.ToString(parameters.PlatformVersion) + } + if parameters.PropagateTags != "" { + config["propagate_tags"] = parameters.PropagateTags + } + if parameters.ReferenceId != nil { + config["reference_id"] = aws.ToString(parameters.ReferenceId) + } + if parameters.TaskCount != nil { + config["task_count"] = aws.ToInt32(parameters.TaskCount) + } + + var capacityProviderStrategyValues []map[string]interface{} + for _, value := range parameters.CapacityProviderStrategy { + strategy := make(map[string]interface{}) + strategy["capacity_provider"] = aws.ToString(value.CapacityProvider) + strategy["base"] = value.Base + strategy["weight"] = value.Weight + capacityProviderStrategyValues = append(capacityProviderStrategyValues, strategy) + } + config["capacity_provider_strategy"] = capacityProviderStrategyValues + + var placementConstraintsValues []map[string]interface{} + for _, value := range parameters.PlacementConstraints { + constraint := make(map[string]interface{}) + constraint["expression"] = aws.ToString(value.Expression) + constraint["type"] = value.Type + placementConstraintsValues = append(placementConstraintsValues, constraint) + } + config["placement_constraints"] = placementConstraintsValues + + var placementStrategyValues []map[string]interface{} + for _, value := range parameters.PlacementStrategy { + strategy := make(map[string]interface{}) + strategy["field"] = aws.ToString(value.Field) + strategy["type"] = value.Type + placementStrategyValues = append(placementStrategyValues, strategy) + } + config["placement_strategy"] = placementStrategyValues + + var tagValues []map[string]interface{} + for _, tag := range parameters.Tags { + t := make(map[string]interface{}) + t["key"] = aws.ToString(tag.Key) + t["value"] = aws.ToString(tag.Value) + tagValues = append(tagValues, t) + } + config["tags"] = tagValues + + if parameters.NetworkConfiguration != nil { + config["network_configuration"] = flattenTargetNetworkConfiguration(parameters.NetworkConfiguration) + } + + if parameters.Overrides != nil { + config["overrides"] = flattenTargetECSTaskOverrides(parameters.Overrides) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetNetworkConfiguration(parameters *types.NetworkConfiguration) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.AwsvpcConfiguration != nil { + awsVpcConfiguration := make(map[string]interface{}) + awsVpcConfiguration["assign_public_ip"] = parameters.AwsvpcConfiguration.AssignPublicIp + + if parameters.AwsvpcConfiguration.SecurityGroups != nil { + awsVpcConfiguration["security_groups"] = flex.FlattenStringValueSet(parameters.AwsvpcConfiguration.SecurityGroups) + } + + if parameters.AwsvpcConfiguration.Subnets != nil { + awsVpcConfiguration["subnets"] = flex.FlattenStringValueSet(parameters.AwsvpcConfiguration.Subnets) + } + + config["aws_vpc_configuration"] = []map[string]interface{}{awsVpcConfiguration} + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetECSTaskOverrides(parameters *types.EcsTaskOverride) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.Cpu != nil { + config["cpu"] = aws.ToString(parameters.Cpu) + } + if parameters.Memory != nil { + config["memory"] = aws.ToString(parameters.Memory) + } + if parameters.ExecutionRoleArn != nil { + config["execution_role_arn"] = aws.ToString(parameters.ExecutionRoleArn) + } + if parameters.TaskRoleArn != nil { + config["task_role_arn"] = aws.ToString(parameters.TaskRoleArn) + } + + if parameters.EphemeralStorage != nil { + ecsEphemeralStorageConfig := make(map[string]interface{}) + ecsEphemeralStorageConfig["size_in_gib"] = parameters.EphemeralStorage.SizeInGiB + config["ecs_ephemeral_storage"] = []map[string]interface{}{ecsEphemeralStorageConfig} + } + + var inferenceAcceleratorOverridesValues []map[string]interface{} + for _, value := range parameters.InferenceAcceleratorOverrides { + override := make(map[string]interface{}) + override["device_name"] = aws.ToString(value.DeviceName) + override["device_type"] = aws.ToString(value.DeviceType) + inferenceAcceleratorOverridesValues = append(inferenceAcceleratorOverridesValues, override) + } + config["inference_accelerator_overrides"] = inferenceAcceleratorOverridesValues + + var overridesValues []map[string]interface{} + for _, value := range parameters.ContainerOverrides { + override := flattenTargetECSTaskOverrideContainerOverride(value) + overridesValues = append(overridesValues, override) + } + config["container_overrides"] = overridesValues + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetECSTaskOverrideContainerOverride(parameters types.EcsContainerOverride) map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.Cpu != nil { + config["cpu"] = aws.ToInt32(parameters.Cpu) + } + if parameters.Memory != nil { + config["memory"] = aws.ToInt32(parameters.Memory) + } + if parameters.MemoryReservation != nil { + config["memory_reservation"] = aws.ToInt32(parameters.MemoryReservation) + } + if parameters.Name != nil { + config["name"] = aws.ToString(parameters.Name) + } + if parameters.Command != nil { + config["command"] = flex.FlattenStringValueSet(parameters.Command) + } + + var environmentValues []map[string]interface{} + for _, value := range parameters.Environment { + env := make(map[string]interface{}) + env["name"] = aws.ToString(value.Name) + env["value"] = aws.ToString(value.Value) + environmentValues = append(environmentValues, env) + } + config["environment"] = environmentValues + + var environmentFileValues []map[string]interface{} + for _, value := range parameters.EnvironmentFiles { + env := make(map[string]interface{}) + env["type"] = value.Type + env["value"] = aws.ToString(value.Value) + environmentFileValues = append(environmentFileValues, env) + } + config["environment_files"] = environmentFileValues + + var resourceRequirementsValues []map[string]interface{} + for _, value := range parameters.ResourceRequirements { + rr := make(map[string]interface{}) + rr["type"] = value.Type + rr["value"] = aws.ToString(value.Value) + resourceRequirementsValues = append(resourceRequirementsValues, rr) + } + config["resource_requirements"] = resourceRequirementsValues + + return config +} + +func flattenTargetEventBridgeEventBusParameters(parameters *types.PipeTargetEventBridgeEventBusParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.DetailType != nil { + config["detail_type"] = aws.ToString(parameters.DetailType) + } + if parameters.EndpointId != nil { + config["endpoint_id"] = aws.ToString(parameters.EndpointId) + } + if parameters.Source != nil { + config["source"] = aws.ToString(parameters.Source) + } + if parameters.Resources != nil { + config["resources"] = flex.FlattenStringValueSet(parameters.Resources) + } + if parameters.Time != nil { + config["time"] = aws.ToString(parameters.Time) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetHttpParameters(parameters *types.PipeTargetHttpParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + var headerParameters []map[string]interface{} + for key, value := range parameters.HeaderParameters { + header := make(map[string]interface{}) + header["key"] = key + header["value"] = value + headerParameters = append(headerParameters, header) + } + config["header"] = headerParameters + + var queryStringParameters []map[string]interface{} + for key, value := range parameters.QueryStringParameters { + queryString := make(map[string]interface{}) + queryString["key"] = key + queryString["value"] = value + queryStringParameters = append(queryStringParameters, queryString) + } + config["query_string"] = queryStringParameters + config["path_parameters"] = flex.FlattenStringValueList(parameters.PathParameterValues) + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetKinesisStreamParameters(parameters *types.PipeTargetKinesisStreamParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.PartitionKey != nil { + config["partition_key"] = aws.ToString(parameters.PartitionKey) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetLambdaFunctionParameters(parameters *types.PipeTargetLambdaFunctionParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.InvocationType != "" { + config["invocation_type"] = parameters.InvocationType + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetRedshiftDataParameters(parameters *types.PipeTargetRedshiftDataParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.Database != nil { + config["database"] = aws.ToString(parameters.Database) + } + if parameters.DbUser != nil { + config["database_user"] = aws.ToString(parameters.DbUser) + } + if parameters.SecretManagerArn != nil { + config["secret_manager_arn"] = aws.ToString(parameters.SecretManagerArn) + } + if parameters.StatementName != nil { + config["statement_name"] = aws.ToString(parameters.StatementName) + } + config["with_event"] = parameters.WithEvent + if parameters.Sqls != nil { + config["sqls"] = flex.FlattenStringValueSet(parameters.Sqls) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetSageMakerPipelineParameters(parameters *types.PipeTargetSageMakerPipelineParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if len(parameters.PipelineParameterList) != 0 { + var params []map[string]interface{} + for _, param := range parameters.PipelineParameterList { + item := make(map[string]interface{}) + item["name"] = aws.ToString(param.Name) + item["value"] = aws.ToString(param.Value) + params = append(params, item) + } + config["parameters"] = params + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetSqsQueueParameters(parameters *types.PipeTargetSqsQueueParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.MessageDeduplicationId != nil { + config["message_deduplication_id"] = aws.ToString(parameters.MessageDeduplicationId) + } + if parameters.MessageGroupId != nil { + config["message_group_id"] = aws.ToString(parameters.MessageGroupId) + } + + result := []map[string]interface{}{config} + return result +} + +func flattenTargetStepFunctionStateMachineParameters(parameters *types.PipeTargetStateMachineParameters) []map[string]interface{} { + config := make(map[string]interface{}) + + if parameters.InvocationType != "" { + config["invocation_type"] = parameters.InvocationType + } + + result := []map[string]interface{}{config} + return result +} diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go new file mode 100644 index 00000000000..869c0ab20bf --- /dev/null +++ b/internal/service/pipes/target_parameters_test.go @@ -0,0 +1,1110 @@ +package pipes + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" +) + +func Test_expandTargetParameters(t *testing.T) { + tests := map[string]struct { + config map[string]interface{} + expected *types.PipeTargetParameters + }{ + "batch_target config": { + config: map[string]interface{}{ + "batch_target": []interface{}{ + map[string]interface{}{ + "job_definition": "job:test", + "job_name": "test", + "retry_strategy": []interface{}{ + map[string]interface{}{ + "attempts": int32(2), + }, + }, + "array_properties": []interface{}{ + map[string]interface{}{ + "size": int32(50), + }, + }, + "parameters": []interface{}{ + map[string]interface{}{ + "key": "key1", + "value": "value1", + }, + map[string]interface{}{ + "key": "key2", + "value": "value2", + }, + }, + "depends_on": []interface{}{ + map[string]interface{}{ + "job_id": "jobID1", + "type": "N_TO_N", + }, + map[string]interface{}{ + "job_id": "jobID2", + "type": "SEQUENTIAL", + }, + }, + "container_overrides": []interface{}{ + map[string]interface{}{ + "command": schema.NewSet(schema.HashString, []interface{}{ + "command1", + "command2", + }), + "environment": []interface{}{ + map[string]interface{}{ + "name": "env1", + "value": "valueEnv1", + }, + map[string]interface{}{ + "name": "env2", + "value": "valueEnv2", + }, + }, + "instance_type": "instanceType", + "resource_requirements": []interface{}{ + map[string]interface{}{ + "type": "VCPU", + "value": "4", + }, + }, + }, + }, + }, + }, + }, + expected: &types.PipeTargetParameters{ + BatchJobParameters: &types.PipeTargetBatchJobParameters{ + JobDefinition: aws.String("job:test"), + JobName: aws.String("test"), + RetryStrategy: &types.BatchRetryStrategy{ + Attempts: 2, + }, + ArrayProperties: &types.BatchArrayProperties{ + Size: 50, + }, + Parameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + DependsOn: []types.BatchJobDependency{ + { + JobId: aws.String("jobID1"), + Type: types.BatchJobDependencyTypeNToN, + }, + { + JobId: aws.String("jobID2"), + Type: types.BatchJobDependencyTypeSequential, + }, + }, + ContainerOverrides: &types.BatchContainerOverrides{ + Command: []string{"command2", "command1"}, + Environment: []types.BatchEnvironmentVariable{ + { + Name: aws.String("env1"), + Value: aws.String("valueEnv1"), + }, + { + Name: aws.String("env2"), + Value: aws.String("valueEnv2"), + }, + }, + InstanceType: aws.String("instanceType"), + ResourceRequirements: []types.BatchResourceRequirement{ + { + Type: types.BatchResourceRequirementTypeVcpu, + Value: aws.String("4"), + }, + }, + }, + }, + }, + }, + "cloudwatch_logs config": { + config: map[string]interface{}{ + "cloudwatch_logs": []interface{}{ + map[string]interface{}{ + "log_stream_name": "job:test", + "timestamp": "2020-01-01T00:00:00Z", + }, + }, + }, + expected: &types.PipeTargetParameters{ + CloudWatchLogsParameters: &types.PipeTargetCloudWatchLogsParameters{ + LogStreamName: aws.String("job:test"), + Timestamp: aws.String("2020-01-01T00:00:00Z"), + }, + }, + }, + "ecs_task config": { + config: map[string]interface{}{ + "ecs_task": []interface{}{ + map[string]interface{}{ + "task_definition_arn": "arn:test", + "capacity_provider_strategy": []interface{}{ + map[string]interface{}{ + "capacity_provider": "capacityProvider", + "weight": int32(1), + "base": int32(10), + }, + }, + "enable_ecs_managed_tags": true, + "enable_execute_command": true, + "group": "group", + "launch_type": "FARGATE", + "network_configuration": []interface{}{ + map[string]interface{}{ + "aws_vpc_configuration": []interface{}{ + map[string]interface{}{ + "assign_public_ip": "ENABLED", + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + }, + }, + "placement_constraints": []interface{}{ + map[string]interface{}{ + "type": "memberOf", + "expression": "expression", + }, + }, + "placement_strategy": []interface{}{ + map[string]interface{}{ + "type": "binpack", + "field": "field", + }, + }, + "platform_version": "platformVersion", + "propagate_tags": "TASK_DEFINITION", + "reference_id": "referenceID", + "task_count": int32(1), + "tags": []interface{}{ + map[string]interface{}{ + "key": "key1", + "value": "value1", + }, + }, + "overrides": []interface{}{ + map[string]interface{}{ + "cpu": "cpu1", + "memory": "mem2", + "execution_role_arn": "arn:role", + "task_role_arn": "arn:role2", + "inference_accelerator_overrides": []interface{}{ + map[string]interface{}{ + "device_name": "deviceName", + "device_type": "deviceType", + }, + }, + "ecs_ephemeral_storage": []interface{}{ + map[string]interface{}{ + "size_in_gib": int32(30), + }, + }, + "container_overrides": []interface{}{ + map[string]interface{}{ + "cpu": int32(5), + "memory": int32(6), + "memory_reservation": int32(7), + "name": "name", + "command": schema.NewSet(schema.HashString, []interface{}{ + "command1", + "command2", + }), + "environment": []interface{}{ + map[string]interface{}{ + "name": "env1", + "value": "valueEnv1", + }, + }, + "environment_files": []interface{}{ + map[string]interface{}{ + "value": "some:arnvalue", + "type": "s3", + }, + }, + "resource_requirements": []interface{}{ + map[string]interface{}{ + "type": "GPU", + "value": "4", + }, + }, + }, + }, + }, + }, + }, + }, + }, + expected: &types.PipeTargetParameters{ + EcsTaskParameters: &types.PipeTargetEcsTaskParameters{ + TaskDefinitionArn: aws.String("arn:test"), + CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ + { + CapacityProvider: aws.String("capacityProvider"), + Weight: 1, + Base: 10, + }, + }, + EnableECSManagedTags: true, + EnableExecuteCommand: true, + Group: aws.String("group"), + LaunchType: types.LaunchTypeFargate, + NetworkConfiguration: &types.NetworkConfiguration{ + AwsvpcConfiguration: &types.AwsVpcConfiguration{ + AssignPublicIp: types.AssignPublicIpEnabled, + SecurityGroups: []string{ + "sg2", + "sg1", + }, + Subnets: []string{ + "subnet1", + "subnet2", + }, + }, + }, + PlacementConstraints: []types.PlacementConstraint{ + { + Type: types.PlacementConstraintTypeMemberOf, + Expression: aws.String("expression"), + }, + }, + PlacementStrategy: []types.PlacementStrategy{ + { + Type: types.PlacementStrategyTypeBinpack, + Field: aws.String("field"), + }, + }, + PlatformVersion: aws.String("platformVersion"), + PropagateTags: types.PropagateTagsTaskDefinition, + ReferenceId: aws.String("referenceID"), + TaskCount: aws.Int32(1), + Tags: []types.Tag{ + { + Key: aws.String("key1"), + Value: aws.String("value1"), + }, + }, + Overrides: &types.EcsTaskOverride{ + Cpu: aws.String("cpu1"), + Memory: aws.String("mem2"), + ExecutionRoleArn: aws.String("arn:role"), + TaskRoleArn: aws.String("arn:role2"), + InferenceAcceleratorOverrides: []types.EcsInferenceAcceleratorOverride{ + { + DeviceName: aws.String("deviceName"), + DeviceType: aws.String("deviceType"), + }, + }, + EphemeralStorage: &types.EcsEphemeralStorage{ + SizeInGiB: 30, + }, + ContainerOverrides: []types.EcsContainerOverride{ + { + Cpu: aws.Int32(5), + Memory: aws.Int32(6), + MemoryReservation: aws.Int32(7), + Name: aws.String("name"), + Command: []string{"command2", "command1"}, + Environment: []types.EcsEnvironmentVariable{ + { + Name: aws.String("env1"), + Value: aws.String("valueEnv1"), + }, + }, + EnvironmentFiles: []types.EcsEnvironmentFile{ + { + Value: aws.String("some:arnvalue"), + Type: types.EcsEnvironmentFileTypeS3, + }, + }, + ResourceRequirements: []types.EcsResourceRequirement{ + { + Type: types.EcsResourceRequirementTypeGpu, + Value: aws.String("4"), + }, + }, + }, + }, + }, + }, + }, + }, + "event_bridge_event_bus config": { + config: map[string]interface{}{ + "event_bridge_event_bus": []interface{}{ + map[string]interface{}{ + "detail_type": "some.event", + "endpoint_id": "endpointID", + "source": "source", + "time": "2020-01-01T00:00:00Z", + "resources": schema.NewSet(schema.HashString, []interface{}{ + "id1", + "id2", + }), + }, + }, + }, + expected: &types.PipeTargetParameters{ + EventBridgeEventBusParameters: &types.PipeTargetEventBridgeEventBusParameters{ + DetailType: aws.String("some.event"), + EndpointId: aws.String("endpointID"), + Source: aws.String("source"), + Time: aws.String("2020-01-01T00:00:00Z"), + Resources: []string{ + "id2", + "id1", + }, + }, + }, + }, + "http_parameters config": { + config: map[string]interface{}{ + "http_parameters": []interface{}{ + map[string]interface{}{ + "path_parameters": []interface{}{"a", "b"}, + "header": []interface{}{ + map[string]interface{}{ + "key": "key1", + "value": "value1", + }, + map[string]interface{}{ + "key": "key2", + "value": "value2", + }, + }, + "query_string": []interface{}{ + map[string]interface{}{ + "key": "key3", + "value": "value3", + }, + map[string]interface{}{ + "key": "key4", + "value": "value4", + }, + }, + }, + }, + }, + expected: &types.PipeTargetParameters{ + HttpParameters: &types.PipeTargetHttpParameters{ + PathParameterValues: []string{"a", "b"}, + HeaderParameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + QueryStringParameters: map[string]string{ + "key3": "value3", + "key4": "value4", + }, + }, + }, + }, + "kinesis_stream config": { + config: map[string]interface{}{ + "kinesis_stream": []interface{}{ + map[string]interface{}{ + "partition_key": "partitionKey", + }, + }, + }, + expected: &types.PipeTargetParameters{ + KinesisStreamParameters: &types.PipeTargetKinesisStreamParameters{ + PartitionKey: aws.String("partitionKey"), + }, + }, + }, + "lambda_function config": { + config: map[string]interface{}{ + "lambda_function": []interface{}{ + map[string]interface{}{ + "invocation_type": "FIRE_AND_FORGET", + }, + }, + }, + expected: &types.PipeTargetParameters{ + LambdaFunctionParameters: &types.PipeTargetLambdaFunctionParameters{ + InvocationType: types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + "redshift_data config": { + config: map[string]interface{}{ + "redshift_data": []interface{}{ + map[string]interface{}{ + "database": "database", + "database_user": "database_user", + "secret_manager_arn": "arn:secrets", + "statement_name": "statement_name", + "with_event": true, + "sqls": schema.NewSet(schema.HashString, []interface{}{ + "sql2", + "sql1", + }), + }, + }, + }, + expected: &types.PipeTargetParameters{ + RedshiftDataParameters: &types.PipeTargetRedshiftDataParameters{ + Database: aws.String("database"), + DbUser: aws.String("database_user"), + SecretManagerArn: aws.String("arn:secrets"), + StatementName: aws.String("statement_name"), + WithEvent: true, + Sqls: []string{"sql2", "sql1"}, + }, + }, + }, + "sage_maker_pipeline config": { + config: map[string]interface{}{ + "sage_maker_pipeline": []interface{}{ + map[string]interface{}{ + "parameters": []interface{}{ + map[string]interface{}{ + "name": "name1", + "value": "value1", + }, + map[string]interface{}{ + "name": "name2", + "value": "value2", + }, + }, + }, + }, + }, + expected: &types.PipeTargetParameters{ + SageMakerPipelineParameters: &types.PipeTargetSageMakerPipelineParameters{ + PipelineParameterList: []types.SageMakerPipelineParameter{ + { + Name: aws.String("name1"), + Value: aws.String("value1"), + }, + { + Name: aws.String("name2"), + Value: aws.String("value2"), + }, + }, + }, + }, + }, + "sqs_queue config": { + config: map[string]interface{}{ + "sqs_queue": []interface{}{ + map[string]interface{}{ + "message_deduplication_id": "deduplication-id", + "message_group_id": "group-id", + }, + }, + }, + expected: &types.PipeTargetParameters{ + SqsQueueParameters: &types.PipeTargetSqsQueueParameters{ + MessageDeduplicationId: aws.String("deduplication-id"), + MessageGroupId: aws.String("group-id"), + }, + }, + }, + "step_function config": { + config: map[string]interface{}{ + "step_function": []interface{}{ + map[string]interface{}{ + "invocation_type": "FIRE_AND_FORGET", + }, + }, + }, + expected: &types.PipeTargetParameters{ + StepFunctionStateMachineParameters: &types.PipeTargetStateMachineParameters{ + InvocationType: types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + "input_template config": { + config: map[string]interface{}{ + "input_template": "some template", + }, + expected: &types.PipeTargetParameters{ + InputTemplate: aws.String("some template"), + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := expandTargetParameters([]interface{}{tt.config}) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func Test_flattenTargetParameters(t *testing.T) { + tests := map[string]struct { + expected []map[string]interface{} + config *types.PipeTargetParameters + }{ + "batch_target config": { + expected: []map[string]interface{}{ + { + "batch_target": []map[string]interface{}{ + { + "job_definition": "job:test", + "job_name": "test", + "retry_strategy": []map[string]interface{}{ + { + "attempts": int32(2), + }, + }, + "array_properties": []map[string]interface{}{ + { + "size": int32(50), + }, + }, + "parameters": []map[string]interface{}{ + { + "key": "key1", + "value": "value1", + }, + { + "key": "key2", + "value": "value2", + }, + }, + "depends_on": []map[string]interface{}{ + { + "job_id": "jobID1", + "type": types.BatchJobDependencyTypeNToN, + }, + { + "job_id": "jobID2", + "type": types.BatchJobDependencyTypeSequential, + }, + }, + "container_overrides": []map[string]interface{}{ + { + "command": schema.NewSet(schema.HashString, []interface{}{ + "command1", + "command2", + }), + "environment": []map[string]interface{}{ + { + "name": "env1", + "value": "valueEnv1", + }, + { + "name": "env2", + "value": "valueEnv2", + }, + }, + "instance_type": "instanceType", + "resource_requirements": []map[string]interface{}{ + { + "type": types.BatchResourceRequirementTypeVcpu, + "value": "4", + }, + }, + }, + }, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + BatchJobParameters: &types.PipeTargetBatchJobParameters{ + JobDefinition: aws.String("job:test"), + JobName: aws.String("test"), + RetryStrategy: &types.BatchRetryStrategy{ + Attempts: 2, + }, + ArrayProperties: &types.BatchArrayProperties{ + Size: 50, + }, + Parameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + DependsOn: []types.BatchJobDependency{ + { + JobId: aws.String("jobID1"), + Type: types.BatchJobDependencyTypeNToN, + }, + { + JobId: aws.String("jobID2"), + Type: types.BatchJobDependencyTypeSequential, + }, + }, + ContainerOverrides: &types.BatchContainerOverrides{ + Command: []string{"command2", "command1"}, + Environment: []types.BatchEnvironmentVariable{ + { + Name: aws.String("env1"), + Value: aws.String("valueEnv1"), + }, + { + Name: aws.String("env2"), + Value: aws.String("valueEnv2"), + }, + }, + InstanceType: aws.String("instanceType"), + ResourceRequirements: []types.BatchResourceRequirement{ + { + Type: types.BatchResourceRequirementTypeVcpu, + Value: aws.String("4"), + }, + }, + }, + }, + }, + }, + "cloudwatch_logs config": { + expected: []map[string]interface{}{ + { + "cloudwatch_logs": []map[string]interface{}{ + { + "log_stream_name": "job:test", + "timestamp": "2020-01-01T00:00:00Z", + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + CloudWatchLogsParameters: &types.PipeTargetCloudWatchLogsParameters{ + LogStreamName: aws.String("job:test"), + Timestamp: aws.String("2020-01-01T00:00:00Z"), + }, + }, + }, + "ecs_task config": { + expected: []map[string]interface{}{ + { + "ecs_task": []map[string]interface{}{ + { + "task_definition_arn": "arn:test", + "capacity_provider_strategy": []map[string]interface{}{ + { + "capacity_provider": "capacityProvider", + "weight": int32(1), + "base": int32(10), + }, + }, + "enable_ecs_managed_tags": true, + "enable_execute_command": true, + "group": "group", + "launch_type": types.LaunchTypeFargate, + "network_configuration": []map[string]interface{}{ + { + "aws_vpc_configuration": []map[string]interface{}{ + { + "assign_public_ip": types.AssignPublicIpEnabled, + "security_groups": schema.NewSet(schema.HashString, []interface{}{ + "sg1", + "sg2", + }), + "subnets": schema.NewSet(schema.HashString, []interface{}{ + "subnet1", + "subnet2", + }), + }, + }, + }, + }, + "placement_constraints": []map[string]interface{}{ + { + "type": types.PlacementConstraintTypeMemberOf, + "expression": "expression", + }, + }, + "placement_strategy": []map[string]interface{}{ + { + "type": types.PlacementStrategyTypeBinpack, + "field": "field", + }, + }, + "platform_version": "platformVersion", + "propagate_tags": types.PropagateTagsTaskDefinition, + "reference_id": "referenceID", + "task_count": int32(1), + "tags": []map[string]interface{}{ + { + "key": "key1", + "value": "value1", + }, + }, + "overrides": []map[string]interface{}{ + { + "cpu": "cpu1", + "memory": "mem2", + "execution_role_arn": "arn:role", + "task_role_arn": "arn:role2", + "inference_accelerator_overrides": []map[string]interface{}{ + { + "device_name": "deviceName", + "device_type": "deviceType", + }, + }, + "ecs_ephemeral_storage": []map[string]interface{}{ + { + "size_in_gib": int32(30), + }, + }, + "container_overrides": []map[string]interface{}{ + { + "cpu": int32(5), + "memory": int32(6), + "memory_reservation": int32(7), + "name": "name", + "command": schema.NewSet(schema.HashString, []interface{}{ + "command1", + "command2", + }), + "environment": []map[string]interface{}{ + { + "name": "env1", + "value": "valueEnv1", + }, + }, + "environment_files": []map[string]interface{}{ + { + "value": "some:arnvalue", + "type": types.EcsEnvironmentFileTypeS3, + }, + }, + "resource_requirements": []map[string]interface{}{ + { + "type": types.EcsResourceRequirementTypeGpu, + "value": "4", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + EcsTaskParameters: &types.PipeTargetEcsTaskParameters{ + TaskDefinitionArn: aws.String("arn:test"), + CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ + { + CapacityProvider: aws.String("capacityProvider"), + Weight: 1, + Base: 10, + }, + }, + EnableECSManagedTags: true, + EnableExecuteCommand: true, + Group: aws.String("group"), + LaunchType: types.LaunchTypeFargate, + NetworkConfiguration: &types.NetworkConfiguration{ + AwsvpcConfiguration: &types.AwsVpcConfiguration{ + AssignPublicIp: types.AssignPublicIpEnabled, + SecurityGroups: []string{ + "sg2", + "sg1", + }, + Subnets: []string{ + "subnet1", + "subnet2", + }, + }, + }, + PlacementConstraints: []types.PlacementConstraint{ + { + Type: types.PlacementConstraintTypeMemberOf, + Expression: aws.String("expression"), + }, + }, + PlacementStrategy: []types.PlacementStrategy{ + { + Type: types.PlacementStrategyTypeBinpack, + Field: aws.String("field"), + }, + }, + PlatformVersion: aws.String("platformVersion"), + PropagateTags: types.PropagateTagsTaskDefinition, + ReferenceId: aws.String("referenceID"), + TaskCount: aws.Int32(1), + Tags: []types.Tag{ + { + Key: aws.String("key1"), + Value: aws.String("value1"), + }, + }, + Overrides: &types.EcsTaskOverride{ + Cpu: aws.String("cpu1"), + Memory: aws.String("mem2"), + ExecutionRoleArn: aws.String("arn:role"), + TaskRoleArn: aws.String("arn:role2"), + InferenceAcceleratorOverrides: []types.EcsInferenceAcceleratorOverride{ + { + DeviceName: aws.String("deviceName"), + DeviceType: aws.String("deviceType"), + }, + }, + EphemeralStorage: &types.EcsEphemeralStorage{ + SizeInGiB: 30, + }, + ContainerOverrides: []types.EcsContainerOverride{ + { + Cpu: aws.Int32(5), + Memory: aws.Int32(6), + MemoryReservation: aws.Int32(7), + Name: aws.String("name"), + Command: []string{"command2", "command1"}, + Environment: []types.EcsEnvironmentVariable{ + { + Name: aws.String("env1"), + Value: aws.String("valueEnv1"), + }, + }, + EnvironmentFiles: []types.EcsEnvironmentFile{ + { + Value: aws.String("some:arnvalue"), + Type: types.EcsEnvironmentFileTypeS3, + }, + }, + ResourceRequirements: []types.EcsResourceRequirement{ + { + Type: types.EcsResourceRequirementTypeGpu, + Value: aws.String("4"), + }, + }, + }, + }, + }, + }, + }, + }, + "event_bridge_event_bus config": { + expected: []map[string]interface{}{ + { + "event_bridge_event_bus": []map[string]interface{}{ + { + "detail_type": "some.event", + "endpoint_id": "endpointID", + "source": "source", + "time": "2020-01-01T00:00:00Z", + "resources": schema.NewSet(schema.HashString, []interface{}{ + "id1", + "id2", + }), + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + EventBridgeEventBusParameters: &types.PipeTargetEventBridgeEventBusParameters{ + DetailType: aws.String("some.event"), + EndpointId: aws.String("endpointID"), + Source: aws.String("source"), + Time: aws.String("2020-01-01T00:00:00Z"), + Resources: []string{ + "id2", + "id1", + }, + }, + }, + }, + "http_parameters config": { + expected: []map[string]interface{}{ + { + "http_parameters": []map[string]interface{}{ + { + "path_parameters": []interface{}{"a", "b"}, + "header": []map[string]interface{}{ + { + "key": "key1", + "value": "value1", + }, + { + "key": "key2", + "value": "value2", + }, + }, + "query_string": []map[string]interface{}{ + { + "key": "key3", + "value": "value3", + }, + { + "key": "key4", + "value": "value4", + }, + }, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + HttpParameters: &types.PipeTargetHttpParameters{ + PathParameterValues: []string{"a", "b"}, + HeaderParameters: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + QueryStringParameters: map[string]string{ + "key3": "value3", + "key4": "value4", + }, + }, + }, + }, + "kinesis_stream config": { + expected: []map[string]interface{}{ + { + "kinesis_stream": []map[string]interface{}{ + { + "partition_key": "partitionKey", + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + KinesisStreamParameters: &types.PipeTargetKinesisStreamParameters{ + PartitionKey: aws.String("partitionKey"), + }, + }, + }, + "lambda_function config": { + expected: []map[string]interface{}{ + { + "lambda_function": []map[string]interface{}{ + { + "invocation_type": types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + LambdaFunctionParameters: &types.PipeTargetLambdaFunctionParameters{ + InvocationType: types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + "redshift_data config": { + expected: []map[string]interface{}{ + { + "redshift_data": []map[string]interface{}{ + { + "database": "database", + "database_user": "database_user", + "secret_manager_arn": "arn:secrets", + "statement_name": "statement_name", + "with_event": true, + "sqls": schema.NewSet(schema.HashString, []interface{}{ + "sql2", + "sql1", + }), + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + RedshiftDataParameters: &types.PipeTargetRedshiftDataParameters{ + Database: aws.String("database"), + DbUser: aws.String("database_user"), + SecretManagerArn: aws.String("arn:secrets"), + StatementName: aws.String("statement_name"), + WithEvent: true, + Sqls: []string{"sql2", "sql1"}, + }, + }, + }, + "sage_maker_pipeline config": { + expected: []map[string]interface{}{ + { + "sage_maker_pipeline": []map[string]interface{}{ + { + "parameters": []map[string]interface{}{ + { + "name": "name1", + "value": "value1", + }, + { + "name": "name2", + "value": "value2", + }, + }, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + SageMakerPipelineParameters: &types.PipeTargetSageMakerPipelineParameters{ + PipelineParameterList: []types.SageMakerPipelineParameter{ + { + Name: aws.String("name1"), + Value: aws.String("value1"), + }, + { + Name: aws.String("name2"), + Value: aws.String("value2"), + }, + }, + }, + }, + }, + "sqs_queue config": { + expected: []map[string]interface{}{ + { + "sqs_queue": []map[string]interface{}{ + { + "message_deduplication_id": "deduplication-id", + "message_group_id": "group-id", + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + SqsQueueParameters: &types.PipeTargetSqsQueueParameters{ + MessageDeduplicationId: aws.String("deduplication-id"), + MessageGroupId: aws.String("group-id"), + }, + }, + }, + "step_function config": { + expected: []map[string]interface{}{ + { + "step_function": []map[string]interface{}{ + { + "invocation_type": types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + }, + config: &types.PipeTargetParameters{ + StepFunctionStateMachineParameters: &types.PipeTargetStateMachineParameters{ + InvocationType: types.PipeTargetInvocationTypeFireAndForget, + }, + }, + }, + "input_template config": { + expected: []map[string]interface{}{ + { + "input_template": "some template", + }, + }, + config: &types.PipeTargetParameters{ + InputTemplate: aws.String("some template"), + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + got := flattenTargetParameters(tt.config) + + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/internal/service/pipes/test-fixtures/lambdatest.zip b/internal/service/pipes/test-fixtures/lambdatest.zip new file mode 100644 index 0000000000000000000000000000000000000000..5c636e955b2cccd992ac213993798acfdc39d6aa GIT binary patch literal 342 zcmWIWW@Zs#U|`^2_)xpcjj3pP&N2{>k%57iL53kGF*hkCu_U#)L@%p2G=!6ZndL`H zXdn=mR&X;gvU~-q18Xlm&k&i8#+QN6XCT<*-b`*@ggZ%;WfQT_nWzO%+$*O&s=9G|AR z&f>yR^AA6>@7sK}irLaRiOHq^)$z9dpQ3v8avCCzNC$W`GRZOH@~#BX;|vTyA2BRx d1hLRO&kFH8n#TjYS=m5}8G$euNWTSf7yyUqbSVG; literal 0 HcmV?d00001 diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 96426187c73..d240c6f7736 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -12,6 +12,8 @@ Terraform resource for managing an AWS EventBridge Pipes Pipe. You can find out more about EventBridge Pipes in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html). +EventBridge Pipes are very configurable, and may require IAM permissions to work correctly. More information on the configuration options and IAM permissions can be found in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html). + ~> **Note:** EventBridge was formerly known as CloudWatch Events. The functionality is identical. ## Example Usage @@ -87,9 +89,91 @@ resource "aws_pipes_pipe" "example" { role_arn = aws_iam_role.example.arn source = aws_sqs_queue.source.arn target = aws_sqs_queue.target.arn +} +``` + +### Enrichment Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.example.arn + + enrichment_parameters { + http_parameters { + header { + key = "example-header" + value = "example-value" + } + + header { + key = "second-example-header" + value = "second-example-value" + } + + path_parameters = ["example-path-param"] - source_parameters {} - target_parameters {} + query_string { + key = "example-query-string" + value = "example-value" + } + + query_string { + key = "second-example-query-string" + value = "second-example-value" + } + } + } +} +``` + +### Filter Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + filter_criteria { + filter { + pattern = jsonencode({ + source = ["event-source"] + }) + } + } + } +} +``` + +### SQS Source and Target Configuration Usage + +```terraform +resource "aws_pipes_pipe" "example" { + name = "example-pipe" + role_arn = aws_iam_role.example.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + source_parameters { + sqs_queue { + batch_size = 1 + maximum_batching_window_in_seconds = 2 + } + } + + target_parameters { + sqs_queue { + message_deduplication_id = "example-dedupe" + message_group_id = "example-group" + } + } } ``` @@ -100,21 +184,54 @@ The following arguments are required: * `role_arn` - (Required) ARN of the role that allows the pipe to send data to the target. * `source` - (Required) Source resource of the pipe (typically an ARN). * `target` - (Required) Target resource of the pipe (typically an ARN). -* `source_parameters` - (Required) Parameters required to set up a source for the pipe. Detailed below. -* `target_parameters` - (Required) Parameters required to set up a target for your pipe. Detailed below. The following arguments are optional: * `description` - (Optional) A description of the pipe. At most 512 characters. * `desired_state` - (Optional) The state the pipe should be in. One of: `RUNNING`, `STOPPED`. * `enrichment` - (Optional) Enrichment resource of the pipe (typically an ARN). Read more about enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-enrichment). +* `enrichment_parameters` - (Optional) Parameters to configure enrichment for your pipe. Detailed below. * `name` - (Optional) Name of the pipe. If omitted, Terraform will assign a random, unique name. Conflicts with `name_prefix`. * `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`. +* `source_parameters` - (Optional) Parameters to configure a source for the pipe. Detailed below. +* `target_parameters` - (Optional) Parameters to configure a target for your pipe. Detailed below. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### enrichment_parameters Configuration Block + +You can find out more about EventBridge Pipes Enrichment in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/pipes-enrichment.html). + +* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. +* `http_parameters` - (Optional) Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or EventBridge ApiDestination. If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can use this parameter to specify headers, path parameters, and query string keys/values as part of your target invoking request. If you're using ApiDestinations, the corresponding Connection can also have these values configured. In case of any conflicting keys, values from the Connection take precedence. Detailed below. + +#### enrichment_parameters.http_parameters Configuration Block + +* `header` - (Optional) The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. +* `path_parameters` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string` - (Optional) The query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. + +##### enrichment_parameters.http_parameters.header Configuration Block + +* `key` - (Optional) The name of the header. Maximum length of 512 characters. +* `value` - (Optional) The header value. Maximum length of 512 characters. + +##### enrichment_parameters.http_parameters.query_string Configuration Block + +* `key` - (Optional) The name of the query string. Maximum length of 512 characters. +* `value` - (Optional) The header query string. Maximum length of 512 characters. + ### source_parameters Configuration Block -* `filter_criteria` - (Optional) The collection of event patterns used to filter events. Detailed below. +You can find out more about EventBridge Pipes Sources in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-source.html). + +* `active_mq_broker` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. +* `dynamo_db_stream` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. +* `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. +* `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `managed_streaming_kafka` - (Optional) The parameters for using an MSK stream as a source. Detailed below. +* `rabbit_mq_broker` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. +* `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. +* `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. #### source_parameters.filter_criteria Configuration Block @@ -124,9 +241,322 @@ The following arguments are optional: * `pattern` - (Required) The event pattern. At most 4096 characters. +#### source_parameters.active_mq_broker Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `credentials` - (Required) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `queue` - (Required) The name of the destination queue to consume. Maximum length of 1000. + +##### source_parameters.active_mq_broker.credentials Configuration Block + +* `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the basic auth credentials. + +#### source_parameters.dynamo_db_stream Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `maximum_record_age_in_seconds` - (Optional) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records. Maximum value of 604,800. +* `maximum_retry_attempts` - (Optional) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source. Maximum value of 10,000. +* `on_partial_batch_item_failure` - (Optional) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch. Valid values: AUTOMATIC_BISECT. +* `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. + +##### source_parameters.dynamo_db_stream.dead_letter_config Configuration Block + +* `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. + +#### source_parameters.kinesis_stream Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `maximum_record_age_in_seconds` - (Optional) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records. Maximum value of 604,800. +* `maximum_retry_attempts` - (Optional) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source. Maximum value of 10,000. +* `on_partial_batch_item_failure` - (Optional) Define how to handle item process failures. AUTOMATIC_BISECT halves each batch and retry each half until all the records are processed or there is one failed message left in the batch. Valid values: AUTOMATIC_BISECT. +* `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. +* `starting_position` - (Required) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST, AT_TIMESTAMP. +* `starting_position_timestamp` - (Optional) With StartingPosition set to AT_TIMESTAMP, the time from which to start reading, in Unix time seconds. + +##### source_parameters.kinesis_stream.dead_letter_config Configuration Block + +* `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. + +#### source_parameters.managed_streaming_kafka Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. +* `credentials` - (Optional) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. +* `topic` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. + +##### source_parameters.managed_streaming_kafka.credentials Configuration Block + +* `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. + +#### source_parameters.rabbit_mq_broker Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `credentials` - (Required) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `queue` - (Required) The name of the destination queue to consume. Maximum length of 1000. +* `virtual_host` - (Optional) The name of the virtual host associated with the source broker. Maximum length of 200. + +##### source_parameters.rabbit_mq_broker.credentials Configuration Block + +* `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the credentials. + +#### source_parameters.self_managed_kafka Configuration Block + +* `servers` - (Optional) An array of server URLs. Maximum number of 2 items, each of maximum length 300. +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. +* `credentials` - (Optional) The credentials needed to access the resource. Detailed below. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. +* `server_root_ca_certificate` - (Optional) The ARN of the Secrets Manager secret used for certification. +* `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. +* `topic` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. +* `vpc` - (Optional) This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used. Detailed below. + +##### source_parameters.self_managed_kafka.credentials Configuration Block + +* `basic_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_256_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. +* `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. + +##### source_parameters.self_managed_kafka.vpc Configuration Block + +* `security_groups` - (Optional) List of security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. +* `subnets` - (Optional) List of the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets. + +#### source_parameters.sqs_queue Configuration Block + +* `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. +* `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. + ### target_parameters Configuration Block -* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. +You can find out more about EventBridge Pipes Targets in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-target.html). + +* `batch_target` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. +* `cloudwatch_logs` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. +* `ecs_task` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. +* `event_bridge_event_bus` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. +* `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. +* `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. +* `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. +* `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. +* `sage_maker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. +* `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. +* `step_function` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. + +#### target_parameters.batch_target Configuration Block + +* `array_properties` - (Optional) The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job. Detailed below. +* `container_overrides` - (Optional) The overrides that are sent to a container. Detailed below. +* `depends_on` - (Optional) A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a SEQUENTIAL type dependency without specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an N_TO_N type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each dependency to complete before it can begin. Detailed below. +* `job_definition` - (Required) The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used. +* `job_name` - (Required) The name of the job. It can be up to 128 letters long. +* `parameters` - (Optional) Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition. Detailed below. +* `retry_strategy` - (Optional) The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. Detailed below. + +##### target_parameters.batch_target.array_properties Configuration Block + +* `size` - (Optional) The size of the array, if this is an array batch job. Minimum value of 2. Maximum value of 10,000. + +##### target_parameters.batch_target.container_overrides Configuration Block + +* `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. +* `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. Environment variables cannot start with " AWS Batch ". This naming convention is reserved for variables that AWS Batch sets. Detailed below. +* `instance_type` - (Optional) The instance type to use for a multi-node parallel job. This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided. +* `resource_requirements` - (Optional) The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. Detailed below. + +###### target_parameters.batch_target.container_overrides.environment Configuration Block + +* `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. +* `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. + +###### target_parameters.batch_target.container_overrides.resource_requirements Configuration Block + +* `type` - (Optional) The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU. +* `value` - (Optional) The quantity of the specified resource to reserve for the container. [The values vary based on the type specified](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchResourceRequirement.html). + +##### target_parameters.batch_target.depends_on Configuration Block + +* `job_id` - (Optional) The job ID of the AWS Batch job that's associated with this dependency. +* `type` - (Optional) The type of the job dependency. Valid Values: N_TO_N, SEQUENTIAL. + +##### target_parameters.batch_target.parameters Configuration Block + +* `key` - (Optional) The name of the parameter. +* `value` - (Optional) The value of the parameter. + +##### target_parameters.batch_target.retry_strategy Configuration Block + +* `attempts` - (Optional) The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. Maximum value of 10. + +#### target_parameters.cloudwatch_logs Configuration Block + +* `log_stream_name` - (Optional) The name of the log stream. +* `timestamp` - (Optional) The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. This is the JSON path to the field in the event e.g. $.detail.timestamp + +#### target_parameters.ecs_task Configuration Block + +* `capacity_provider_strategy` - (Optional) List of capacity provider strategies to use for the task. If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used. Detailed below. +* `enable_ecs_managed_tags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the task. Valid values: true, false. +* `enable_execute_command` - (Optional) Whether or not to enable the execute command functionality for the containers in this task. If true, this enables execute command functionality on all containers in the task. Valid values: true, false. +* `group` - (Optional) Specifies an Amazon ECS task group for the task. The maximum length is 255 characters. +* `launch_type` - (Optional) Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. Valid Values: EC2, FARGATE, EXTERNAL +* `network_configuration` - (Optional) Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails. Detailed below. +* `overrides` - (Optional) The overrides that are associated with a task. Detailed below. +* `placement_constraints` - (Optional) An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). Detailed below. +* `placement_strategy` - (Optional) The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. Detailed below. +* `platform_version` - (Optional) Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This structure is used only if LaunchType is FARGATE. +* `propagate_tags` - (Optional) Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action. Valid Values: TASK_DEFINITION +* `reference_id` - (Optional) The reference ID to use for the task. Maximum length of 1,024. +* `tags` - (Optional) The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Detailed below. +* `task_count` - (Optional) The number of tasks to create based on TaskDefinition. The default is 1. +* `task_definition_arn` - (Optional) The ARN of the task definition to use if the event target is an Amazon ECS task. + +##### target_parameters.ecs_task.capacity_provider_strategy Configuration Block + +* `base` - (Optional) The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used. Maximum value of 100,000. +* `capacity_provider` - (Optional) The short name of the capacity provider. Maximum value of 255. +* `weight` - (Optional) The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Maximum value of 1,000. + +##### target_parameters.ecs_task.network_configuration Configuration Block + +* `aws_vpc_configuration` - (Optional) Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode. Detailed below. + +###### target_parameters.ecs_task.network_configuration.aws_vpc_configuration Configuration Block + +* `assign_public_ip` - (Optional) Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE. Valid Values: ENABLED, DISABLED. +* `security_groups` - (Optional) Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. +* `subnets` - (Optional) Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets. + +##### target_parameters.ecs_task.overrides Configuration Block + +* `container_overrides` - (Optional) One or more container overrides that are sent to a task. Detailed below. +* `cpu` - (Optional) The cpu override for the task. +* `ecs_ephemeral_storage` - (Optional) The ephemeral storage setting override for the task. Detailed below. +* `execution_role_arn` - (Optional) The Amazon Resource Name (ARN) of the task execution IAM role override for the task. +* `inference_accelerator_overrides` - (Optional) List of Elastic Inference accelerator overrides for the task. Detailed below. +* `memory` - (Optional) The memory override for the task. +* `task_role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. + +###### target_parameters.ecs_task.overrides.container_overrides Configuration Block + +* `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name. +* `cpu` - (Optional) The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name. +* `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name. Detailed below. +* `environment_files` - (Optional) A list of files containing the environment variables to pass to a container, instead of the value from the container definition. Detailed below. +* `memory` - (Optional) The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name. +* `memory_reservation` - (Optional) The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name. +* `name` - (Optional) The name of the container that receives the override. This parameter is required if any override is specified. +* `resource_requirements` - (Optional) The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU. Detailed below. + +###### target_parameters.ecs_task.overrides.container_overrides.environment Configuration Block + +* `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. +* `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. + +###### target_parameters.ecs_task.overrides.container_overrides.environment_files Configuration Block + +* `type` - (Optional) The file type to use. The only supported value is s3. +* `value` - (Optional) The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. + +###### target_parameters.ecs_task.overrides.container_overrides.resource_requirements Configuration Block + +* `type` - (Optional) The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator. +* `value` - (Optional) The value for the specified resource type. If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the InferenceAccelerator type is used, the value matches the deviceName for an InferenceAccelerator specified in a task definition. + +###### target_parameters.ecs_task.overrides.ecs_ephemeral_storage Configuration Block + +* `size_in_gib` - (Required) The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. + +###### target_parameters.ecs_task.overrides.inference_accelerator_overrides Configuration Block + +* `device_name` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. +* `device_type` - (Optional) The Elastic Inference accelerator type to use. + +##### target_parameters.ecs_task.placement_constraints Configuration Block + +* `expression` - (Optional) A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. Maximum length of 2,000. +* `type` - (Optional) The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. Valid Values: distinctInstance, memberOf. + +##### target_parameters.ecs_task.placement_strategy Configuration Block + +* `field` - (Optional) The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. Maximum length of 255. +* `type` - (Optional) The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task). Valid Values: random, spread, binpack. + +##### target_parameters.ecs_task.tags Configuration Block + +* `key` - (Optional) A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources. Maximum length of 128. +* `value` - (Optional) The value for the specified tag key. Maximum length of 256. + +#### target_parameters.event_bridge_event_bus Configuration Block + +* `detail_type` - (Optional) A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail. +* `endpoint_id` - (Optional) The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo. +* `resources` - (Optional) List of AWS resources, identified by Amazon Resource Name (ARN), which the event primarily concerns. Any number, including zero, may be present. +* `source` - (Optional) The source of the event. Maximum length of 256. +* `time` - (Optional) The time stamp of the event, per RFC3339. If no time stamp is provided, the time stamp of the PutEvents call is used. This is the JSON path to the field in the event e.g. $.detail.timestamp + +#### target_parameters.http_parameters Configuration Block + +* `header` - (Optional) The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. +* `path_parameters` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string` - (Optional) The query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. + +##### target_parameters.http_parameters.header Configuration Block + +* `key` - (Optional) The name of the header. Maximum length of 512 characters. +* `value` - (Optional) The header value. Maximum length of 512 characters. + +##### target_parameters.http_parameters.query_string Configuration Block + +* `key` - (Optional) The name of the query string. Maximum length of 512 characters. +* `value` - (Optional) The header query string. Maximum length of 512 characters. + +#### target_parameters.kinesis_stream Configuration Block + +* `partition_key` - (Required) Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. + +#### target_parameters.lambda_function Configuration Block + +* `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. + +#### target_parameters.redshift_data Configuration Block + +* `database` - (Required) The name of the database. Required when authenticating using temporary credentials. +* `database_user` - (Optional) The database user name. Required when authenticating using temporary credentials. +* `secret_manager_arn` - (Optional) The name or ARN of the secret that enables access to the database. Required when authenticating using Secrets Manager. +* `sqls` - (Optional) List of SQL statements text to run, each of maximum length of 100,000. +* `statement_name` - (Optional) The name of the SQL statement. You can name the SQL statement when you create it to identify the query. +* `with_event` - (Optional) Indicates whether to send an event back to EventBridge after the SQL statement runs. + +#### target_parameters.sage_maker_pipeline Configuration Block + +* `parameters` - (Optional) List of Parameter names and values for SageMaker Model Building Pipeline execution. Detailed below. + +##### target_parameters.sage_maker_pipeline.parameters Configuration Block + +* `name` - (Optional) Name of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 256. +* `value` - (Optional) Value of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 1024. + +#### target_parameters.sqs_queue Configuration Block + +* `message_deduplication_id` - (Optional) This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. +* `message_group_id` - (Optional) The FIFO message group ID to use as the target. + +#### target_parameters.step_function Configuration Block + +* `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. ## Attributes Reference From e80db56be5a6c46a63a60dad5822090c56154fdd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 12:40:21 -0400 Subject: [PATCH 02/65] r/aws_pipes_pipe: Alphabetize attributes. --- internal/service/pipes/pipe.go | 103 +++++++++++---------------------- 1 file changed, 34 insertions(+), 69 deletions(-) diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index e6a4e125d7f..9eac4ef7963 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -2,7 +2,6 @@ package pipes import ( "context" - "errors" "log" "regexp" "time" @@ -66,6 +65,7 @@ func ResourcePipe() *schema.Resource { Optional: true, ValidateFunc: verify.ValidARN, }, + "enrichment_parameters": enrichment_parameters_schema, "name": { Type: schema.TypeString, Optional: true, @@ -102,16 +102,15 @@ func ResourcePipe() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1})?:(\d{12})?:(.+)$`), ""), ), }, + "source_parameters": source_parameters_schema, "target": { Type: schema.TypeString, Required: true, ValidateFunc: verify.ValidARN, }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "source_parameters": source_parameters_schema, - "target_parameters": target_parameters_schema, - "enrichment_parameters": enrichment_parameters_schema, + "target_parameters": target_parameters_schema, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, } } @@ -133,6 +132,14 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf Target: aws.String(d.Get("target").(string)), } + if v, ok := d.GetOk("description"); ok { + input.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("enrichment"); ok && v != "" { + input.Enrichment = aws.String(v.(string)) + } + if v, ok := d.GetOk("enrichment_parameters"); ok { input.EnrichmentParameters = expandEnrichmentParameters(v.([]interface{})) } @@ -145,23 +152,12 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf input.TargetParameters = expandTargetParameters(v.([]interface{})) } - if v, ok := d.GetOk("description"); ok { - input.Description = aws.String(v.(string)) - } - - if v, ok := d.GetOk("enrichment"); ok && v != "" { - input.Enrichment = aws.String(v.(string)) - } - output, err := conn.CreatePipe(ctx, input) + if err != nil { return create.DiagError(names.Pipes, create.ErrActionCreating, ResNamePipe, name, err) } - if output == nil || output.Arn == nil { - return create.DiagError(names.Pipes, create.ErrActionCreating, ResNamePipe, name, errors.New("empty output")) - } - d.SetId(aws.ToString(output.Name)) if _, err := waitPipeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -190,37 +186,19 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("description", output.Description) d.Set("desired_state", output.DesiredState) d.Set("enrichment", output.Enrichment) + if err := d.Set("enrichment_parameters", flattenEnrichmentParameters(output.EnrichmentParameters)); err != nil { + return diag.Errorf("setting enrichment_parameters: %s", err) + } d.Set("name", output.Name) d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) d.Set("role_arn", output.RoleArn) d.Set("source", output.Source) - d.Set("target", output.Target) - - if output.SourceParameters != nil { - params := flattenSourceParameters(output.SourceParameters) - if params != nil { - if err := d.Set("source_parameters", params); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) - } - } - } - - if output.EnrichmentParameters != nil { - params := flattenEnrichmentParameters(output.EnrichmentParameters) - if params != nil { - if err := d.Set("enrichment_parameters", params); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) - } - } + if err := d.Set("source_parameters", flattenSourceParameters(output.SourceParameters)); err != nil { + return diag.Errorf("setting source_parameters: %s", err) } - - if output.TargetParameters != nil { - params := flattenTargetParameters(output.TargetParameters) - if params != nil { - if err := d.Set("target_parameters", params); err != nil { - return create.DiagError(names.Pipes, create.ErrActionSetting, ResNamePipe, d.Id(), err) - } - } + d.Set("target", output.Target) + if err := d.Set("target_parameters", flattenTargetParameters(output.TargetParameters)); err != nil { + return diag.Errorf("setting target_parameters: %s", err) } return nil @@ -235,29 +213,21 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(d.Id()), RoleArn: aws.String(d.Get("role_arn").(string)), - Target: aws.String(d.Get("target").(string)), - } - - if d.HasChange("enrichment") { // Reset state in case it's a deletion. - input.Enrichment = nil - } - - if d.HasChange("enrichment_parameters") { - // Reset state in case it's a deletion. - input.EnrichmentParameters = nil - } - - // Reset state in case it's a deletion. - input.SourceParameters = &types.UpdatePipeSourceParameters{ - FilterCriteria: &types.FilterCriteria{ - Filters: nil, + SourceParameters: &types.UpdatePipeSourceParameters{ + FilterCriteria: &types.FilterCriteria{}, + }, + Target: aws.String(d.Get("target").(string)), + // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. + TargetParameters: &types.PipeTargetParameters{ + InputTemplate: aws.String(""), }, } - // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. - input.TargetParameters = &types.PipeTargetParameters{ - InputTemplate: aws.String(""), + if d.HasChange("enrichment") { + if v, ok := d.GetOk("enrichment"); ok && v.(string) != "" { + input.Enrichment = aws.String(v.(string)) + } } if v, ok := d.GetOk("enrichment_parameters"); ok { @@ -272,13 +242,8 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf input.TargetParameters = expandTargetParameters(v.([]interface{})) } - if v, ok := d.GetOk("enrichment"); ok && v.(string) != "" { - input.Enrichment = aws.String(v.(string)) - } - - log.Printf("[DEBUG] Updating EventBridge Pipes Pipe (%s): %#v", d.Id(), input) - output, err := conn.UpdatePipe(ctx, input) + if err != nil { return create.DiagError(names.Pipes, create.ErrActionUpdating, ResNamePipe, d.Id(), err) } From 76d73ea779bafb89a884032284e3f6e723bc1e05 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 12:49:52 -0400 Subject: [PATCH 03/65] Add CHANGELOG entry. --- .changelog/31607.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/31607.txt diff --git a/.changelog/31607.txt b/.changelog/31607.txt new file mode 100644 index 00000000000..fc3d2b1cad4 --- /dev/null +++ b/.changelog/31607.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_pipes_pipe: Add `enrichment_parameters` argument +``` \ No newline at end of file From cc443f8d100f14d3b0c04fbdaf30a12db6f42e62 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 13:27:24 -0400 Subject: [PATCH 04/65] Tidy up 'enrichment_parameters'. --- .../service/pipes/enrichment_parameters.go | 108 +++++++++--------- internal/service/pipes/pipe.go | 2 +- 2 files changed, 56 insertions(+), 54 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go index 90a75e65786..5fa340833dd 100644 --- a/internal/service/pipes/enrichment_parameters.go +++ b/internal/service/pipes/enrichment_parameters.go @@ -8,71 +8,73 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -var enrichment_parameters_schema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "input_template": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 8192), - }, - "http_parameters": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "header": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), +func enrichmentParametersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, }, }, }, - }, - "path_parameters": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + "path_parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, - }, - "query_string": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), + "query_string": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, }, }, }, }, }, }, + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), + }, }, }, - }, + } } func expandEnrichmentParameters(config []interface{}) *types.PipeEnrichmentParameters { diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 9eac4ef7963..e25c589e721 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -65,7 +65,7 @@ func ResourcePipe() *schema.Resource { Optional: true, ValidateFunc: verify.ValidARN, }, - "enrichment_parameters": enrichment_parameters_schema, + "enrichment_parameters": enrichmentParametersSchema(), "name": { Type: schema.TypeString, Optional: true, From bc29dd0c37db45db3095bd8ec5db09f310214435 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 13:39:13 -0400 Subject: [PATCH 05/65] Tidy up 'source_parameters'. --- .changelog/31607.txt | 4 + internal/service/pipes/pipe.go | 2 +- internal/service/pipes/source_parameters.go | 1088 ++++++++++--------- 3 files changed, 550 insertions(+), 544 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index fc3d2b1cad4..690ca32f4c3 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -1,3 +1,7 @@ ```release-note:enhancement resource/aws_pipes_pipe: Add `enrichment_parameters` argument +``` + +```release-note:enhancement +resource/aws_pipes_pipe: Add `active_mq_broker`, `dynamo_db_stream`, `kinesis_stream`, `managed_streaming_kafka`, `rabbit_mq_broker`, `self_managed_kafka` and `sqs_queue` attributes to the `source_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index e25c589e721..8f30167bcd5 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -102,7 +102,7 @@ func ResourcePipe() *schema.Resource { validation.StringMatch(regexp.MustCompile(`^smk://(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\-]+):([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1})?:(\d{12})?:(.+)$`), ""), ), }, - "source_parameters": source_parameters_schema, + "source_parameters": sourceParametersSchema(), "target": { Type: schema.TypeString, Required: true, diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index b3fe00cd1e2..28b4f226b0f 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -13,538 +13,564 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -var verifySecretsManagerARN = validation.StringMatch(regexp.MustCompile(`^(^arn:aws([a-z]|\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1}):(\d{12}):secret:.+)$`), "") - -var source_parameters_schema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "active_mq_broker": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.kinesis_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" +func sourceParametersSchema() *schema.Schema { + verifySecretsManagerARN := validation.StringMatch(regexp.MustCompile(`^(^arn:aws([a-z]|\-)*:secretsmanager:([a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\d{1}):(\d{12}):secret:.+)$`), "") + + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "active_mq_broker": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.kinesis_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.self_managed_kafka", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, }, - }, - "queue": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1000), - validation.StringMatch(regexp.MustCompile(`^[\s\S]*$`), ""), - ), - }, - "credentials": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "basic_auth": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verifySecretsManagerARN, + "credentials": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_auth": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verifySecretsManagerARN, + }, }, }, }, + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, + }, + "queue": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1000), + validation.StringMatch(regexp.MustCompile(`^[\s\S]*$`), ""), + ), + }, }, }, }, - }, - "dynamo_db_stream": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.kinesis_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "starting_position": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.DynamoDBStreamStartPosition](), - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, - }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" + "dynamo_db_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.kinesis_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.self_managed_kafka", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, }, - }, - "maximum_record_age_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.Any( - validation.IntInSlice([]int{-1}), - validation.IntBetween(60, 604_800), - ), - }, - "maximum_retry_attempts": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(-1, 10_000), - }, - "parallelization_factor": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10), - Default: 1, - }, - "on_partial_batch_item_failure": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.OnPartialBatchItemFailureStreams](), - }, - "dead_letter_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, + "dead_letter_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, + }, + "maximum_record_age_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.Any( + validation.IntInSlice([]int{-1}), + validation.IntBetween(60, 604_800), + ), + }, + "maximum_retry_attempts": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(-1, 10_000), + }, + "on_partial_batch_item_failure": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.OnPartialBatchItemFailureStreams](), + }, + "parallelization_factor": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + Default: 1, + }, + "starting_position": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.DynamoDBStreamStartPosition](), + }, }, }, }, - }, - "kinesis_stream": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "starting_position": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KinesisStreamStartPosition](), - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" + "filter_criteria": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeList, + Required: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pattern": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 4096), + }, + }, + }, }, }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" + }, + }, + "kinesis_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.self_managed_kafka", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, }, - }, - "maximum_record_age_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.Any( - validation.IntInSlice([]int{-1}), - validation.IntBetween(60, 604_800), - ), - }, - "parallelization_factor": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10), - Default: 1, - }, - "maximum_retry_attempts": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(-1, 10_000), - }, - "on_partial_batch_item_failure": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.OnPartialBatchItemFailureStreams](), - }, - "dead_letter_config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, + "dead_letter_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "starting_position_timestamp": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.IsRFC3339Time, + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, + }, + "maximum_record_age_in_seconds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.Any( + validation.IntInSlice([]int{-1}), + validation.IntBetween(60, 604_800), + ), + }, + "maximum_retry_attempts": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(-1, 10_000), + }, + "on_partial_batch_item_failure": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.OnPartialBatchItemFailureStreams](), + }, + "parallelization_factor": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), + Default: 1, + }, + "starting_position": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KinesisStreamStartPosition](), + }, + "starting_position_timestamp": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IsRFC3339Time, + }, }, }, }, - }, - "managed_streaming_kafka": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.kinesis_stream", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_certificate_tls_auth": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verifySecretsManagerARN, - }, - "sasl_scram_512_auth": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verifySecretsManagerARN, + "managed_streaming_kafka": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.kinesis_stream", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.self_managed_kafka", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, + }, + "consumer_group_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 200), + validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), + ), + }, + "credentials": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate_tls_auth": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verifySecretsManagerARN, + }, + "sasl_scram_512_auth": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verifySecretsManagerARN, + }, }, }, }, - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, }, - }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" + "starting_position": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.MSKStartPosition](), + }, + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 249), + validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), + ), }, - }, - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 249), - validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), - ), - }, - "consumer_group_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 200), - validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), - ), - }, - "starting_position": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.MSKStartPosition](), }, }, }, - }, - "rabbit_mq_broker": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.kinesis_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "basic_auth": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verifySecretsManagerARN, + "rabbit_mq_broker": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.kinesis_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.self_managed_kafka", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, + }, + "credentials": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_auth": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verifySecretsManagerARN, + }, }, }, }, - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, }, - }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" + "queue": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1000), + validation.StringMatch(regexp.MustCompile(`^[\s\S]*$`), ""), + ), + }, + "virtual_host": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 200), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-\/*:_+=.@-]*$`), ""), + ), }, - }, - "queue": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1000), - validation.StringMatch(regexp.MustCompile(`^[\s\S]*$`), ""), - ), - }, - "virtual_host": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 200), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-\/*:_+=.@-]*$`), ""), - ), }, }, }, - }, - "self_managed_kafka": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.kinesis_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "basic_auth": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verifySecretsManagerARN, - }, - "client_certificate_tls_auth": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verifySecretsManagerARN, - }, - "sasl_scram_256_auth": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verifySecretsManagerARN, - }, - "sasl_scram_512_auth": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verifySecretsManagerARN, + "self_managed_kafka": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.kinesis_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "100" + }, + }, + "consumer_group_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 200), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-\/*:_+=.@-]*$`), ""), + ), + }, + "credentials": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "basic_auth": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verifySecretsManagerARN, + }, + "client_certificate_tls_auth": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verifySecretsManagerARN, + }, + "sasl_scram_256_auth": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verifySecretsManagerARN, + }, + "sasl_scram_512_auth": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verifySecretsManagerARN, + }, }, }, }, - }, - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, }, - }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" + "server_root_ca_certificate": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, }, - }, - "topic": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 249), - validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), - ), - }, - "consumer_group_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 200), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9-\/*:_+=.@-]*$`), ""), - ), - }, - "starting_position": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.SelfManagedKafkaStartPosition](), - }, - "server_root_ca_certificate": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, - }, - "servers": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - MaxItems: 2, - Elem: &schema.Schema{ - Type: schema.TypeString, + "servers": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 300), + validation.StringMatch(regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}$`), ""), + ), + }, + }, + "starting_position": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.SelfManagedKafkaStartPosition](), + }, + "topic": { + Type: schema.TypeString, + Required: true, + ForceNew: true, ValidateFunc: validation.All( - validation.StringLenBetween(1, 300), - validation.StringMatch(regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}$`), ""), + validation.StringLenBetween(1, 249), + validation.StringMatch(regexp.MustCompile(`^[^.]([a-zA-Z0-9\-_.]+)$`), ""), ), }, - }, - "vpc": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "security_groups": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 5, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1024), - validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*$`), ""), - ), + "vpc": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*$`), ""), + ), + }, }, - }, - "subnets": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 16, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1024), - validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*$`), ""), - ), + "subnets": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 16, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*$`), ""), + ), + }, }, }, }, @@ -552,65 +578,41 @@ var source_parameters_schema = &schema.Schema{ }, }, }, - }, - "sqs_queue": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", - "source_parameters.0.kinesis_stream", - "source_parameters.0.managed_streaming_kafka", - "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.self_managed_kafka", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "10" - }, - }, - "maximum_batching_window_in_seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, - }, + "sqs_queue": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + ConflictsWith: []string{ + "source_parameters.0.active_mq_broker", + "source_parameters.0.dynamo_db_stream", + "source_parameters.0.kinesis_stream", + "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.self_managed_kafka", }, - }, - }, - "filter_criteria": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "filter": { - Type: schema.TypeList, - Required: true, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "pattern": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 4096), - }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10000), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "10" + }, + }, + "maximum_batching_window_in_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 300), + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" }, }, }, @@ -618,7 +620,7 @@ var source_parameters_schema = &schema.Schema{ }, }, }, - }, + } } func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { From 24b9f39729e6fb76c258c6ffbc2f7ab232368e6c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:04:58 -0400 Subject: [PATCH 06/65] Tidy up 'target_parameters'. --- .changelog/31607.txt | 4 + internal/service/pipes/pipe.go | 2 +- internal/service/pipes/target_parameters.go | 1520 ++++++++++--------- 3 files changed, 766 insertions(+), 760 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 690ca32f4c3..18bc8f2636e 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -4,4 +4,8 @@ resource/aws_pipes_pipe: Add `enrichment_parameters` argument ```release-note:enhancement resource/aws_pipes_pipe: Add `active_mq_broker`, `dynamo_db_stream`, `kinesis_stream`, `managed_streaming_kafka`, `rabbit_mq_broker`, `self_managed_kafka` and `sqs_queue` attributes to the `source_parameters` configuration block +``` + +```release-note:enhancement +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sage_maker_pipeline`, `sqs_queue` and `step_function` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 8f30167bcd5..8a31cb57f3f 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -108,7 +108,7 @@ func ResourcePipe() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, - "target_parameters": target_parameters_schema, + "target_parameters": targetParametersSchema(), names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index a1aa97c5e82..162247877fc 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -12,149 +12,151 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -var target_parameters_schema = &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_target": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "job_definition": { - Type: schema.TypeString, - Required: true, - }, - "job_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "retry_strategy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attempts": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(1, 10), +func targetParametersSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_target": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "array_properties": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(2, 10000), + }, }, }, }, - }, - "array_properties": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "size": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(2, 10000), + "container_overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "instance_type": { + Type: schema.TypeString, + Optional: true, + }, + "resource_requirements": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.BatchResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, }, }, }, - }, - "parameters": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, + "depends_on": { + Type: schema.TypeList, + Optional: true, + MaxItems: 20, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.BatchJobDependencyType](), + }, }, }, }, - }, - "depends_on": { - Type: schema.TypeList, - Optional: true, - MaxItems: 20, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "job_id": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.BatchJobDependencyType](), - }, - }, + "job_definition": { + Type: schema.TypeString, + Required: true, }, - }, - "container_overrides": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "command": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + "job_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Optional: true, }, - }, - "environment": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, + "value": { + Type: schema.TypeString, + Optional: true, }, }, - "instance_type": { - Type: schema.TypeString, - Optional: true, - }, - "resource_requirements": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.BatchResourceRequirementType](), - }, - "value": { - Type: schema.TypeString, - Required: true, - }, - }, + }, + }, + "retry_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attempts": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 10), }, }, }, @@ -162,709 +164,709 @@ var target_parameters_schema = &schema.Schema{ }, }, }, - }, - "cloudwatch_logs": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "log_stream_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 256), - }, - "timestamp": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 256), - validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), - ), - }, + "cloudwatch_logs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", }, - }, - }, - "ecs_task": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "task_definition_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "capacity_provider_strategy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 6, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "capacity_provider": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 255), - }, - "base": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100000), - Default: 0, - }, - "weight": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 1000), - Default: 0, - }, - }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_stream_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 256), + }, + "timestamp": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), }, }, - "enable_ecs_managed_tags": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "enable_execute_command": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "group": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 255), - }, - "launch_type": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.LaunchType](), - }, - "network_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "aws_vpc_configuration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "assign_public_ip": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.AssignPublicIp](), - }, - "security_groups": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 5, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1024), - validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), - ), - }, - }, - "subnets": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 16, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 1024), - validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), - ), - }, - }, - }, + }, + }, + "ecs_task": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_provider_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 6, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "capacity_provider": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "base": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100000), + Default: 0, + }, + "weight": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 1000), + Default: 0, }, }, }, }, - }, - "placement_constraints": { - Type: schema.TypeList, - Optional: true, - MaxItems: 10, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "expression": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 2000), - }, - "type": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.PlacementConstraintType](), - }, - }, + "enable_ecs_managed_tags": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, - }, - "placement_strategy": { - Type: schema.TypeList, - Optional: true, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 255), - }, - "type": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.PlacementStrategyType](), - }, - }, + "enable_execute_command": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, - }, - "platform_version": { - Type: schema.TypeString, - Optional: true, - }, - "propagate_tags": { - Type: schema.TypeString, - Optional: true, - ValidateDiagFunc: enum.Validate[types.PropagateTags](), - }, - "reference_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1024), - }, - "task_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, - }, - "tags": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 256), - }, - }, + "group": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), }, - }, - "overrides": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeString, - Optional: true, - }, - "memory": { - Type: schema.TypeString, - Optional: true, - }, - "execution_role_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, - }, - "task_role_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, - }, - "inference_accelerator_overrides": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "device_name": { - Type: schema.TypeString, - Optional: true, - }, - "device_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "ecs_ephemeral_storage": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "size_in_gib": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(21, 200), + "launch_type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.LaunchType](), + }, + "network_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "aws_vpc_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "assign_public_ip": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AssignPublicIp](), + }, + "security_groups": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^sg-[0-9a-zA-Z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, + "subnets": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 16, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 1024), + validation.StringMatch(regexp.MustCompile(`^subnet-[0-9a-z]*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + }, }, }, }, }, - "container_overrides": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cpu": { - Type: schema.TypeInt, - Optional: true, - }, - "memory": { - Type: schema.TypeInt, - Optional: true, - }, - "memory_reservation": { - Type: schema.TypeInt, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "command": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + }, + }, + "overrides": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_overrides": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, - }, - "environment": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, + "cpu": { + Type: schema.TypeInt, + Optional: true, + }, + "environment": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Optional: true, + }, }, }, }, - }, - "environment_files": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.EcsEnvironmentFileType](), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "environment_files": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsEnvironmentFileType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "resource_requirements": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.EcsResourceRequirementType](), - }, - "value": { - Type: schema.TypeString, - Required: true, + "memory": { + Type: schema.TypeInt, + Optional: true, + }, + "memory_reservation": { + Type: schema.TypeInt, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "resource_requirements": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.EcsResourceRequirementType](), + }, + "value": { + Type: schema.TypeString, + Required: true, + }, }, }, }, }, }, }, + "cpu": { + Type: schema.TypeString, + Optional: true, + }, + "ecs_ephemeral_storage": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size_in_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(21, 200), + }, + }, + }, + }, + "execution_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "inference_accelerator_overrides": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Optional: true, + }, + "device_type": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "memory": { + Type: schema.TypeString, + Optional: true, + }, + "task_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, + "placement_constraints": { + Type: schema.TypeList, + Optional: true, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementConstraintType](), + }, + }, + }, + }, + "placement_strategy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + "type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PlacementStrategyType](), + }, }, }, }, + "platform_version": { + Type: schema.TypeString, + Optional: true, + }, + "propagate_tags": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.PropagateTags](), + }, + "reference_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "task_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "task_definition_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "event_bridge_event_bus": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "detail_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 128), - }, - "endpoint_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 50), - validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9\-]+[\.][A-Za-z0-9\-]+$`), ""), - ), - }, - "resources": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 10, - Elem: &schema.Schema{ + "event_bridge_event_bus": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "detail_type": { Type: schema.TypeString, - ValidateFunc: verify.ValidARN, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 128), + }, + "endpoint_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 50), + validation.StringMatch(regexp.MustCompile(`^[A-Za-z0-9\-]+[\.][A-Za-z0-9\-]+$`), ""), + ), + }, + "resources": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 10, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, + }, + "source": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + ), + }, + "time": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), + ), }, - }, - "source": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 256), - ), - }, - "time": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 256), - validation.StringMatch(regexp.MustCompile(`^\$(\.[\w/_-]+(\[(\d+|\*)\])*)*$`), ""), - ), }, }, }, - }, - "http_parameters": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "header": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), + "http_parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, }, }, }, - }, - "path_parameters": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + "path_parameters": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, - }, - "query_string": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), + "query_string": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 512), + }, }, }, }, }, }, }, - }, - "input_template": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 8192), - }, - "kinesis_stream": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "input_template": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 8192), }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "partition_key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 256), + "kinesis_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, }, }, }, - }, - "lambda_function": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "invocation_type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + "lambda_function": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, }, }, }, - }, - "redshift_data": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "database": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 64), - }, - "sqls": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ + "redshift_data": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(1, 100000), + Required: true, + ValidateFunc: validation.StringLenBetween(1, 64), + }, + "database_user": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + "secret_manager_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + "statement_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, + "sqls": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 100000), + }, + }, + "with_event": { + Type: schema.TypeBool, + Optional: true, + Default: false, }, - }, - "database_user": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "secret_manager_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, - }, - "statement_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 500), - }, - "with_event": { - Type: schema.TypeBool, - Optional: true, - Default: false, }, }, }, - }, - "sage_maker_pipeline": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "parameters": { - Type: schema.TypeList, - Optional: true, - MaxItems: 200, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 256), - validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), - ), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 1024), + "sage_maker_pipeline": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sqs_queue", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "parameters": { + Type: schema.TypeList, + Optional: true, + MaxItems: 200, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 256), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9](-*[a-zA-Z0-9])*|(\$(\.[\w/_-]+(\[(\d+|\*)\])*)*)$`), ""), + ), + }, + "value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 1024), + }, }, }, }, }, }, }, - }, - "sqs_queue": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.step_function", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message_deduplication_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 100), - }, - "message_group_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 100), + "sqs_queue": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.step_function", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message_deduplication_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + "message_group_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, }, }, }, - }, - "step_function": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - ConflictsWith: []string{ - "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", - "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", - "target_parameters.0.sqs_queue", - }, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "invocation_type": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + "step_function": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{ + "target_parameters.0.batch_target", + "target_parameters.0.cloudwatch_logs", + "target_parameters.0.ecs_task", + "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.http_parameters", + "target_parameters.0.kinesis_stream", + "target_parameters.0.lambda_function", + "target_parameters.0.redshift_data", + "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sqs_queue", + }, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invocation_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.PipeTargetInvocationType](), + }, }, }, }, }, }, - }, + } } func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { From 322961305a181ff538a722773278ceb7ccd5f8ec Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:06:47 -0400 Subject: [PATCH 07/65] 'sage_maker_pipeline' -> 'sagemaker_pipeline'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 26 +++++++++---------- .../service/pipes/target_parameters_test.go | 8 +++--- website/docs/r/pipes_pipe.html.markdown | 6 ++--- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 18bc8f2636e..9f41fdcdf29 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `active_mq_broker`, `dynamo_db_stream`, `kinesis_st ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sage_maker_pipeline`, `sqs_queue` and `step_function` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue` and `step_function` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 162247877fc..5177fe8f703 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -31,7 +31,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -176,7 +176,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -210,7 +210,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -543,7 +543,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -601,7 +601,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -670,7 +670,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -696,7 +696,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -722,7 +722,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", "target_parameters.0.step_function", }, @@ -764,7 +764,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "sage_maker_pipeline": { + "sagemaker_pipeline": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -820,7 +820,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.step_function", }, Elem: &schema.Resource{ @@ -851,7 +851,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sage_maker_pipeline", + "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", }, Elem: &schema.Resource{ @@ -917,7 +917,7 @@ func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { parameters.RedshiftDataParameters = expandTargetRedshiftDataParameters(val.([]interface{})) } - if val, ok := param["sage_maker_pipeline"]; ok { + if val, ok := param["sagemaker_pipeline"]; ok { parameters.SageMakerPipelineParameters = expandTargetSageMakerPipelineParameters(val.([]interface{})) } @@ -1622,7 +1622,7 @@ func flattenTargetParameters(targetParameters *types.PipeTargetParameters) []map } if targetParameters.SageMakerPipelineParameters != nil { - config["sage_maker_pipeline"] = flattenTargetSageMakerPipelineParameters(targetParameters.SageMakerPipelineParameters) + config["sagemaker_pipeline"] = flattenTargetSageMakerPipelineParameters(targetParameters.SageMakerPipelineParameters) } if targetParameters.SqsQueueParameters != nil { diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go index 869c0ab20bf..a525f9fa262 100644 --- a/internal/service/pipes/target_parameters_test.go +++ b/internal/service/pipes/target_parameters_test.go @@ -467,9 +467,9 @@ func Test_expandTargetParameters(t *testing.T) { }, }, }, - "sage_maker_pipeline config": { + "sagemaker_pipeline config": { config: map[string]interface{}{ - "sage_maker_pipeline": []interface{}{ + "sagemaker_pipeline": []interface{}{ map[string]interface{}{ "parameters": []interface{}{ map[string]interface{}{ @@ -1021,10 +1021,10 @@ func Test_flattenTargetParameters(t *testing.T) { }, }, }, - "sage_maker_pipeline config": { + "sagemaker_pipeline config": { expected: []map[string]interface{}{ { - "sage_maker_pipeline": []map[string]interface{}{ + "sagemaker_pipeline": []map[string]interface{}{ { "parameters": []map[string]interface{}{ { diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index d240c6f7736..4436a0dbb87 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -351,7 +351,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. * `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. -* `sage_maker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. +* `sagemaker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. * `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. * `step_function` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. @@ -540,11 +540,11 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `statement_name` - (Optional) The name of the SQL statement. You can name the SQL statement when you create it to identify the query. * `with_event` - (Optional) Indicates whether to send an event back to EventBridge after the SQL statement runs. -#### target_parameters.sage_maker_pipeline Configuration Block +#### target_parameters.sagemaker_pipeline Configuration Block * `parameters` - (Optional) List of Parameter names and values for SageMaker Model Building Pipeline execution. Detailed below. -##### target_parameters.sage_maker_pipeline.parameters Configuration Block +##### target_parameters.sagemaker_pipeline.parameters Configuration Block * `name` - (Optional) Name of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 256. * `value` - (Optional) Value of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 1024. From 3d5cef3b8b928e499eae76f8e40fbaae6c35b6ac Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:36:06 -0400 Subject: [PATCH 08/65] pipes: Use 'go-cmp' in unit tests. --- .../service/pipes/enrichment_parameters_test.go | 10 +++++++--- internal/service/pipes/source_parameters_test.go | 14 ++++++++++---- internal/service/pipes/target_parameters.go | 4 ++-- internal/service/pipes/target_parameters_test.go | 10 +++++++--- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go index 8b3d80af451..bab249bfbb9 100644 --- a/internal/service/pipes/enrichment_parameters_test.go +++ b/internal/service/pipes/enrichment_parameters_test.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" - "github.com/stretchr/testify/assert" + "github.com/google/go-cmp/cmp" ) func Test_expandEnrichmentParameters(t *testing.T) { @@ -68,7 +68,9 @@ func Test_expandEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandEnrichmentParameters([]interface{}{tt.config}) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } @@ -137,7 +139,9 @@ func Test_flattenEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenEnrichmentParameters(tt.config) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go index 611e859364a..f926d2a9087 100644 --- a/internal/service/pipes/source_parameters_test.go +++ b/internal/service/pipes/source_parameters_test.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_expandSourceParameters(t *testing.T) { @@ -544,7 +544,9 @@ func Test_expandSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceParameters([]interface{}{tt.config}) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } @@ -1023,7 +1025,9 @@ func Test_flattenSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenSourceParameters(tt.config) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } @@ -1494,7 +1498,9 @@ func Test_expandSourceUpdateParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceUpdateParameters([]interface{}{tt.config}) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 5177fe8f703..92f8a263a38 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -1693,7 +1693,7 @@ func flattenTargetBatchContainerOverrides(parameters *types.BatchContainerOverri config := make(map[string]interface{}) if parameters.Command != nil { - config["command"] = flex.FlattenStringValueSet(parameters.Command) + config["command"] = flex.FlattenStringValueList(parameters.Command) } if parameters.InstanceType != nil { config["instance_type"] = aws.ToString(parameters.InstanceType) @@ -1891,7 +1891,7 @@ func flattenTargetECSTaskOverrideContainerOverride(parameters types.EcsContainer config["name"] = aws.ToString(parameters.Name) } if parameters.Command != nil { - config["command"] = flex.FlattenStringValueSet(parameters.Command) + config["command"] = flex.FlattenStringValueList(parameters.Command) } var environmentValues []map[string]interface{} diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go index a525f9fa262..71ea43ffaaf 100644 --- a/internal/service/pipes/target_parameters_test.go +++ b/internal/service/pipes/target_parameters_test.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_expandTargetParameters(t *testing.T) { @@ -542,7 +542,9 @@ func Test_expandTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandTargetParameters([]interface{}{tt.config}) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } @@ -1104,7 +1106,9 @@ func Test_flattenTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenTargetParameters(tt.config) - assert.Equal(t, tt.expected, got) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } }) } } From 270783e602e09c1802d19c10097d1b9b3c8781a3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:48:43 -0400 Subject: [PATCH 09/65] pipes: Use 'reflect.DeepEquals' in unit tests. --- .../pipes/enrichment_parameters_test.go | 16 +++++++++---- .../service/pipes/source_parameters_test.go | 23 +++++++++++++------ .../service/pipes/target_parameters_test.go | 16 +++++++++---- 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go index bab249bfbb9..96166ff4bc6 100644 --- a/internal/service/pipes/enrichment_parameters_test.go +++ b/internal/service/pipes/enrichment_parameters_test.go @@ -1,11 +1,11 @@ package pipes import ( + "reflect" "testing" "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" - "github.com/google/go-cmp/cmp" ) func Test_expandEnrichmentParameters(t *testing.T) { @@ -68,8 +68,11 @@ func Test_expandEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandEnrichmentParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } @@ -139,8 +142,11 @@ func Test_flattenEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenEnrichmentParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go index f926d2a9087..539720510fe 100644 --- a/internal/service/pipes/source_parameters_test.go +++ b/internal/service/pipes/source_parameters_test.go @@ -1,12 +1,12 @@ package pipes import ( + "reflect" "testing" "time" "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" - "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -544,8 +544,11 @@ func Test_expandSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } @@ -1025,8 +1028,11 @@ func Test_flattenSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenSourceParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } @@ -1498,8 +1504,11 @@ func Test_expandSourceUpdateParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceUpdateParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go index 71ea43ffaaf..89ea473a34f 100644 --- a/internal/service/pipes/target_parameters_test.go +++ b/internal/service/pipes/target_parameters_test.go @@ -1,11 +1,11 @@ package pipes import ( + "reflect" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -542,8 +542,11 @@ func Test_expandTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandTargetParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } @@ -1106,8 +1109,11 @@ func Test_flattenTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenTargetParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !reflect.DeepEqual(got, tt.expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + got, + tt.expected) } }) } From 10324c77c471a1250f6c0c9f8700ce165c333048 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:49:17 -0400 Subject: [PATCH 10/65] Revert "pipes: Use 'reflect.DeepEquals' in unit tests." This reverts commit 270783e602e09c1802d19c10097d1b9b3c8781a3. --- .../pipes/enrichment_parameters_test.go | 16 ++++--------- .../service/pipes/source_parameters_test.go | 23 ++++++------------- .../service/pipes/target_parameters_test.go | 16 ++++--------- 3 files changed, 17 insertions(+), 38 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go index 96166ff4bc6..bab249bfbb9 100644 --- a/internal/service/pipes/enrichment_parameters_test.go +++ b/internal/service/pipes/enrichment_parameters_test.go @@ -1,11 +1,11 @@ package pipes import ( - "reflect" "testing" "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" + "github.com/google/go-cmp/cmp" ) func Test_expandEnrichmentParameters(t *testing.T) { @@ -68,11 +68,8 @@ func Test_expandEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandEnrichmentParameters([]interface{}{tt.config}) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } @@ -142,11 +139,8 @@ func Test_flattenEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenEnrichmentParameters(tt.config) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go index 539720510fe..f926d2a9087 100644 --- a/internal/service/pipes/source_parameters_test.go +++ b/internal/service/pipes/source_parameters_test.go @@ -1,12 +1,12 @@ package pipes import ( - "reflect" "testing" "time" "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -544,11 +544,8 @@ func Test_expandSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceParameters([]interface{}{tt.config}) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } @@ -1028,11 +1025,8 @@ func Test_flattenSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenSourceParameters(tt.config) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } @@ -1504,11 +1498,8 @@ func Test_expandSourceUpdateParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceUpdateParameters([]interface{}{tt.config}) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go index 89ea473a34f..71ea43ffaaf 100644 --- a/internal/service/pipes/target_parameters_test.go +++ b/internal/service/pipes/target_parameters_test.go @@ -1,11 +1,11 @@ package pipes import ( - "reflect" "testing" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -542,11 +542,8 @@ func Test_expandTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandTargetParameters([]interface{}{tt.config}) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } @@ -1109,11 +1106,8 @@ func Test_flattenTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenTargetParameters(tt.config) - if !reflect.DeepEqual(got, tt.expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - got, - tt.expected) + if diff := cmp.Diff(got, tt.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } From bc9e9a3b20b59a72960743664bb5eac3834a6c59 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 14:50:38 -0400 Subject: [PATCH 11/65] Revert "pipes: Use 'go-cmp' in unit tests." This reverts commit 3d5cef3b8b928e499eae76f8e40fbaae6c35b6ac. --- .../service/pipes/enrichment_parameters_test.go | 10 +++------- internal/service/pipes/source_parameters_test.go | 14 ++++---------- internal/service/pipes/target_parameters.go | 4 ++-- internal/service/pipes/target_parameters_test.go | 10 +++------- 4 files changed, 12 insertions(+), 26 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go index bab249bfbb9..8b3d80af451 100644 --- a/internal/service/pipes/enrichment_parameters_test.go +++ b/internal/service/pipes/enrichment_parameters_test.go @@ -5,7 +5,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" - "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" ) func Test_expandEnrichmentParameters(t *testing.T) { @@ -68,9 +68,7 @@ func Test_expandEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandEnrichmentParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } @@ -139,9 +137,7 @@ func Test_flattenEnrichmentParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenEnrichmentParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go index f926d2a9087..611e859364a 100644 --- a/internal/service/pipes/source_parameters_test.go +++ b/internal/service/pipes/source_parameters_test.go @@ -6,8 +6,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/aws/aws-sdk-go/aws" - "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" ) func Test_expandSourceParameters(t *testing.T) { @@ -544,9 +544,7 @@ func Test_expandSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } @@ -1025,9 +1023,7 @@ func Test_flattenSourceParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenSourceParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } @@ -1498,9 +1494,7 @@ func Test_expandSourceUpdateParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandSourceUpdateParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 92f8a263a38..5177fe8f703 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -1693,7 +1693,7 @@ func flattenTargetBatchContainerOverrides(parameters *types.BatchContainerOverri config := make(map[string]interface{}) if parameters.Command != nil { - config["command"] = flex.FlattenStringValueList(parameters.Command) + config["command"] = flex.FlattenStringValueSet(parameters.Command) } if parameters.InstanceType != nil { config["instance_type"] = aws.ToString(parameters.InstanceType) @@ -1891,7 +1891,7 @@ func flattenTargetECSTaskOverrideContainerOverride(parameters types.EcsContainer config["name"] = aws.ToString(parameters.Name) } if parameters.Command != nil { - config["command"] = flex.FlattenStringValueList(parameters.Command) + config["command"] = flex.FlattenStringValueSet(parameters.Command) } var environmentValues []map[string]interface{} diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go index 71ea43ffaaf..a525f9fa262 100644 --- a/internal/service/pipes/target_parameters_test.go +++ b/internal/service/pipes/target_parameters_test.go @@ -5,8 +5,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/stretchr/testify/assert" ) func Test_expandTargetParameters(t *testing.T) { @@ -542,9 +542,7 @@ func Test_expandTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := expandTargetParameters([]interface{}{tt.config}) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } @@ -1106,9 +1104,7 @@ func Test_flattenTargetParameters(t *testing.T) { t.Run(name, func(t *testing.T) { got := flattenTargetParameters(tt.config) - if diff := cmp.Diff(got, tt.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } + assert.Equal(t, tt.expected, got) }) } } From e93fbbab87d72c9719d6f0f83fdb05b91a83d384 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 9 Jun 2023 17:18:09 -0400 Subject: [PATCH 12/65] r/aws_pipes_pipe: 'enrichment_parameters.http_parameters' changes. --- .../service/pipes/enrichment_parameters.go | 185 +- .../pipes/enrichment_parameters_test.go | 143 -- internal/service/pipes/pipe.go | 48 +- internal/service/pipes/pipe_test.go | 12 +- internal/service/pipes/source_parameters.go | 36 + .../service/pipes/source_parameters_test.go | 1500 ----------------- internal/service/pipes/target_parameters.go | 24 + .../service/pipes/target_parameters_test.go | 1110 ------------ website/docs/r/pipes_pipe.html.markdown | 44 +- 9 files changed, 171 insertions(+), 2931 deletions(-) delete mode 100644 internal/service/pipes/enrichment_parameters_test.go delete mode 100644 internal/service/pipes/source_parameters_test.go delete mode 100644 internal/service/pipes/target_parameters_test.go diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go index 5fa340833dd..292cf825799 100644 --- a/internal/service/pipes/enrichment_parameters.go +++ b/internal/service/pipes/enrichment_parameters.go @@ -21,48 +21,22 @@ func enrichmentParametersSchema() *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "header": { - Type: schema.TypeList, + "header_parameters": { + Type: schema.TypeMap, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "path_parameters": { + "path_parameter_values": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, }, }, - "query_string": { - Type: schema.TypeList, + "query_string_parameters": { + Type: schema.TypeMap, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, }, }, }, @@ -77,111 +51,82 @@ func enrichmentParametersSchema() *schema.Schema { } } -func expandEnrichmentParameters(config []interface{}) *types.PipeEnrichmentParameters { - if len(config) == 0 { +func expandPipeEnrichmentParameters(tfMap map[string]interface{}) *types.PipeEnrichmentParameters { + if tfMap == nil { return nil } - var parameters types.PipeEnrichmentParameters - for _, c := range config { - param := c.(map[string]interface{}) - if val, ok := param["input_template"].(string); ok && val != "" { - parameters.InputTemplate = aws.String(val) - } - if val, ok := param["http_parameters"]; ok { - parameters.HttpParameters = expandEnrichmentHTTPParameters(val.([]interface{})) - } + apiObject := &types.PipeEnrichmentParameters{} + + if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.HttpParameters = expandPipeEnrichmentHttpParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["input_template"].(string); ok && v != "" { + apiObject.InputTemplate = aws.String(v) } - return ¶meters + + return apiObject } -func expandEnrichmentHTTPParameters(config []interface{}) *types.PipeEnrichmentHttpParameters { - if len(config) == 0 { +func expandPipeEnrichmentHttpParameters(tfMap map[string]interface{}) *types.PipeEnrichmentHttpParameters { + if tfMap == nil { return nil } - var parameters types.PipeEnrichmentHttpParameters - for _, c := range config { - param := c.(map[string]interface{}) - if val, ok := param["path_parameters"]; ok { - parameters.PathParameterValues = flex.ExpandStringValueList(val.([]interface{})) - } - - if val, ok := param["header"]; ok { - headers := map[string]string{} - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if key, ok := valueParam["key"].(string); ok && key != "" { - if value, ok := valueParam["value"].(string); ok && value != "" { - headers[key] = value - } - } - } - } - if len(headers) > 0 { - parameters.HeaderParameters = headers - } - } - - if val, ok := param["query_string"]; ok { - queryStrings := map[string]string{} - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if key, ok := valueParam["key"].(string); ok && key != "" { - if value, ok := valueParam["value"].(string); ok && value != "" { - queryStrings[key] = value - } - } - } - } - if len(queryStrings) > 0 { - parameters.QueryStringParameters = queryStrings - } - } + apiObject := &types.PipeEnrichmentHttpParameters{} + + if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.HeaderParameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { + apiObject.PathParameterValues = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) } - return ¶meters + + return apiObject } -func flattenEnrichmentParameters(enrichmentParameters *types.PipeEnrichmentParameters) []map[string]interface{} { - config := make(map[string]interface{}) +func flattenPipeEnrichmentParameters(apiObject *types.PipeEnrichmentParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} - if enrichmentParameters.InputTemplate != nil { - config["input_template"] = *enrichmentParameters.InputTemplate + if v := apiObject.HttpParameters; v != nil { + tfMap["http_parameters"] = []interface{}{flattenPipeEnrichmentHttpParameters(v)} } - if enrichmentParameters.HttpParameters != nil { - httpParameters := make(map[string]interface{}) - - var headerParameters []map[string]interface{} - for key, value := range enrichmentParameters.HttpParameters.HeaderParameters { - header := make(map[string]interface{}) - header["key"] = key - header["value"] = value - headerParameters = append(headerParameters, header) - } - httpParameters["header"] = headerParameters - - var queryStringParameters []map[string]interface{} - for key, value := range enrichmentParameters.HttpParameters.QueryStringParameters { - queryString := make(map[string]interface{}) - queryString["key"] = key - queryString["value"] = value - queryStringParameters = append(queryStringParameters, queryString) - } - httpParameters["query_string"] = queryStringParameters - httpParameters["path_parameters"] = flex.FlattenStringValueList(enrichmentParameters.HttpParameters.PathParameterValues) - - config["http_parameters"] = []map[string]interface{}{httpParameters} + if v := apiObject.InputTemplate; v != nil { + tfMap["input_template"] = aws.ToString(v) } - if len(config) == 0 { + return tfMap +} + +func flattenPipeEnrichmentHttpParameters(apiObject *types.PipeEnrichmentHttpParameters) map[string]interface{} { + if apiObject == nil { return nil } - result := []map[string]interface{}{config} - return result + tfMap := map[string]interface{}{} + + if v := apiObject.HeaderParameters; v != nil { + tfMap["header_parameters"] = v + } + + if v := apiObject.PathParameterValues; v != nil { + tfMap["path_parameter_values"] = v + } + + if v := apiObject.QueryStringParameters; v != nil { + tfMap["query_string_parameters"] = v + } + + return tfMap } diff --git a/internal/service/pipes/enrichment_parameters_test.go b/internal/service/pipes/enrichment_parameters_test.go deleted file mode 100644 index 8b3d80af451..00000000000 --- a/internal/service/pipes/enrichment_parameters_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package pipes - -import ( - "testing" - - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/stretchr/testify/assert" -) - -func Test_expandEnrichmentParameters(t *testing.T) { - tests := map[string]struct { - config map[string]interface{} - expected *types.PipeEnrichmentParameters - }{ - "input_template config": { - config: map[string]interface{}{ - "input_template": "some template", - }, - expected: &types.PipeEnrichmentParameters{ - InputTemplate: aws.String("some template"), - }, - }, - "http_parameters config": { - config: map[string]interface{}{ - "http_parameters": []interface{}{ - map[string]interface{}{ - "path_parameters": []interface{}{"a", "b"}, - "header": []interface{}{ - map[string]interface{}{ - "key": "key1", - "value": "value1", - }, - map[string]interface{}{ - "key": "key2", - "value": "value2", - }, - }, - "query_string": []interface{}{ - map[string]interface{}{ - "key": "key3", - "value": "value3", - }, - map[string]interface{}{ - "key": "key4", - "value": "value4", - }, - }, - }, - }, - }, - expected: &types.PipeEnrichmentParameters{ - HttpParameters: &types.PipeEnrichmentHttpParameters{ - PathParameterValues: []string{"a", "b"}, - HeaderParameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - QueryStringParameters: map[string]string{ - "key3": "value3", - "key4": "value4", - }, - }, - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := expandEnrichmentParameters([]interface{}{tt.config}) - - assert.Equal(t, tt.expected, got) - }) - } -} - -func Test_flattenEnrichmentParameters(t *testing.T) { - tests := map[string]struct { - config *types.PipeEnrichmentParameters - expected []map[string]interface{} - }{ - "input_template config": { - config: &types.PipeEnrichmentParameters{ - InputTemplate: aws.String("some template"), - }, - expected: []map[string]interface{}{ - { - "input_template": "some template", - }, - }, - }, - "http_parameters config": { - config: &types.PipeEnrichmentParameters{ - HttpParameters: &types.PipeEnrichmentHttpParameters{ - PathParameterValues: []string{"a", "b"}, - HeaderParameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - QueryStringParameters: map[string]string{ - "key3": "value3", - "key4": "value4", - }, - }, - }, - expected: []map[string]interface{}{ - { - "http_parameters": []map[string]interface{}{ - { - "path_parameters": []interface{}{"a", "b"}, - "header": []map[string]interface{}{ - { - "key": "key1", - "value": "value1", - }, - { - "key": "key2", - "value": "value2", - }, - }, - "query_string": []map[string]interface{}{ - { - "key": "key3", - "value": "value3", - }, - { - "key": "key4", - "value": "value4", - }, - }, - }, - }, - }, - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := flattenEnrichmentParameters(tt.config) - - assert.Equal(t, tt.expected, got) - }) - } -} diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 8a31cb57f3f..11030410397 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -140,16 +140,16 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf input.Enrichment = aws.String(v.(string)) } - if v, ok := d.GetOk("enrichment_parameters"); ok { - input.EnrichmentParameters = expandEnrichmentParameters(v.([]interface{})) + if v, ok := d.GetOk("enrichment_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.EnrichmentParameters = expandPipeEnrichmentParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.GetOk("source_parameters"); ok { - input.SourceParameters = expandSourceParameters(v.([]interface{})) + if v, ok := d.GetOk("source_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.SourceParameters = expandPipeSourceParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.GetOk("target_parameters"); ok { - input.TargetParameters = expandTargetParameters(v.([]interface{})) + if v, ok := d.GetOk("target_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.TargetParameters = expandPipeTargetParameters(v.([]interface{})[0].(map[string]interface{})) } output, err := conn.CreatePipe(ctx, input) @@ -186,19 +186,31 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("description", output.Description) d.Set("desired_state", output.DesiredState) d.Set("enrichment", output.Enrichment) - if err := d.Set("enrichment_parameters", flattenEnrichmentParameters(output.EnrichmentParameters)); err != nil { - return diag.Errorf("setting enrichment_parameters: %s", err) + if output.EnrichmentParameters != nil { + if err := d.Set("enrichment_parameters", []interface{}{flattenPipeEnrichmentParameters(output.EnrichmentParameters)}); err != nil { + return diag.Errorf("setting enrichment_parameters: %s", err) + } + } else { + d.Set("enrichment_parameters", nil) } d.Set("name", output.Name) d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) d.Set("role_arn", output.RoleArn) d.Set("source", output.Source) - if err := d.Set("source_parameters", flattenSourceParameters(output.SourceParameters)); err != nil { - return diag.Errorf("setting source_parameters: %s", err) + if output.SourceParameters != nil { + if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(output.SourceParameters)}); err != nil { + return diag.Errorf("setting source_parameters: %s", err) + } + } else { + d.Set("source_parameters", nil) } d.Set("target", output.Target) - if err := d.Set("target_parameters", flattenTargetParameters(output.TargetParameters)); err != nil { - return diag.Errorf("setting target_parameters: %s", err) + if output.TargetParameters != nil { + if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(output.TargetParameters)}); err != nil { + return diag.Errorf("setting target_parameters: %s", err) + } + } else { + d.Set("target_parameters", nil) } return nil @@ -230,16 +242,16 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf } } - if v, ok := d.GetOk("enrichment_parameters"); ok { - input.EnrichmentParameters = expandEnrichmentParameters(v.([]interface{})) + if v, ok := d.GetOk("enrichment_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.EnrichmentParameters = expandPipeEnrichmentParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.GetOk("source_parameters"); ok { - input.SourceParameters = expandSourceUpdateParameters(v.([]interface{})) + if v, ok := d.GetOk("source_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.SourceParameters = expandUpdatePipeSourceParameters(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := d.GetOk("target_parameters"); ok { - input.TargetParameters = expandTargetParameters(v.([]interface{})) + if v, ok := d.GetOk("target_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.TargetParameters = expandPipeTargetParameters(v.([]interface{})[0].(map[string]interface{})) } output, err := conn.UpdatePipe(ctx, input) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 498e6b885cd..11552523083 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -2537,16 +2537,14 @@ resource "aws_pipes_pipe" "test" { enrichment_parameters { http_parameters { - header { - key = %[3]q - value = %[4]q + header_parameters = { + %[3]q = %[4]q } - path_parameters = ["parameter1"] + path_parameter_values = ["parameter1"] - query_string { - key = %[5]q - value = %[6]q + query_string_parameters = { + %[5]q = %[6]q } } } diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 28b4f226b0f..ac446ca6ab2 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -623,6 +623,42 @@ func sourceParametersSchema() *schema.Schema { } } +func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceParameters{} + + // ... nested attribute handling ... + + return apiObject +} + +func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceParameters{} + + // ... nested attribute handling ... + + return apiObject +} + +func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + // ... nested attribute handling ... + + return tfMap +} + func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { return nil diff --git a/internal/service/pipes/source_parameters_test.go b/internal/service/pipes/source_parameters_test.go deleted file mode 100644 index 611e859364a..00000000000 --- a/internal/service/pipes/source_parameters_test.go +++ /dev/null @@ -1,1500 +0,0 @@ -package pipes - -import ( - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_expandSourceParameters(t *testing.T) { - tests := map[string]struct { - config map[string]interface{} - expected *types.PipeSourceParameters - }{ - "active_mq_broker config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "queue": "test", - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - ActiveMQBrokerParameters: &types.PipeSourceActiveMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - QueueName: aws.String("test"), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "dynamo_db_stream config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{ - map[string]interface{}{ - "starting_position": "LATEST", - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": "AUTOMATIC_BISECT", - "dead_letter_config": []interface{}{ - map[string]interface{}{ - "arn": "arn:queue", - }, - }, - }, - }, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - DynamoDBStreamParameters: &types.PipeSourceDynamoDBStreamParameters{ - StartingPosition: "LATEST", - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "kinesis_stream config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{ - map[string]interface{}{ - "starting_position": "AT_TIMESTAMP", - "starting_position_timestamp": "2020-01-01T00:00:00Z", - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": "AUTOMATIC_BISECT", - "dead_letter_config": []interface{}{ - map[string]interface{}{ - "arn": "arn:queue", - }, - }, - }, - }, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - KinesisStreamParameters: &types.PipeSourceKinesisStreamParameters{ - StartingPosition: "AT_TIMESTAMP", - StartingPositionTimestamp: aws.Time(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "managed_streaming_kafka config with client_certificate_tls_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "credentials": []interface{}{ - map[string]interface{}{ - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "managed_streaming_kafka config with sasl_scram_512_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "queue": "test", - "virtual_host": "hosting", - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - RabbitMQBrokerParameters: &types.PipeSourceRabbitMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - QueueName: aws.String("test"), - VirtualHost: aws.String("hosting"), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with basic_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with client_certificate_tls_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_512_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_256_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": "LATEST", - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_256_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "sqs_queue config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - }, - }, - "filter_criteria": []interface{}{}, - }, - expected: &types.PipeSourceParameters{ - SqsQueueParameters: &types.PipeSourceSqsQueueParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - }, - }, - }, - "filter_criteria config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{ - map[string]interface{}{ - "filter": []interface{}{ - map[string]interface{}{ - "pattern": "1", - }, - map[string]interface{}{ - "pattern": "2", - }, - }, - }, - }, - }, - expected: &types.PipeSourceParameters{ - FilterCriteria: &types.FilterCriteria{ - Filters: []types.Filter{ - { - Pattern: aws.String("1"), - }, - { - Pattern: aws.String("2"), - }, - }, - }, - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := expandSourceParameters([]interface{}{tt.config}) - - assert.Equal(t, tt.expected, got) - }) - } -} - -func Test_flattenSourceParameters(t *testing.T) { - tests := map[string]struct { - config *types.PipeSourceParameters - expected []map[string]interface{} - }{ - "active_mq_broker config": { - expected: []map[string]interface{}{ - { - "active_mq_broker": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "queue": "test", - "credentials": []map[string]interface{}{ - { - "basic_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - ActiveMQBrokerParameters: &types.PipeSourceActiveMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - QueueName: aws.String("test"), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "dynamo_db_stream config": { - expected: []map[string]interface{}{ - { - "dynamo_db_stream": []map[string]interface{}{ - { - "starting_position": types.DynamoDBStreamStartPositionLatest, - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": types.OnPartialBatchItemFailureStreamsAutomaticBisect, - "dead_letter_config": []map[string]interface{}{ - { - "arn": "arn:queue", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - DynamoDBStreamParameters: &types.PipeSourceDynamoDBStreamParameters{ - StartingPosition: "LATEST", - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "kinesis_stream config": { - expected: []map[string]interface{}{ - { - "kinesis_stream": []map[string]interface{}{ - { - "starting_position": types.KinesisStreamStartPositionAtTimestamp, - "starting_position_timestamp": "2020-01-01T00:00:00Z", - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": types.OnPartialBatchItemFailureStreamsAutomaticBisect, - "dead_letter_config": []map[string]interface{}{ - { - "arn": "arn:queue", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - KinesisStreamParameters: &types.PipeSourceKinesisStreamParameters{ - StartingPosition: "AT_TIMESTAMP", - StartingPositionTimestamp: aws.Time(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "managed_streaming_kafka config with client_certificate_tls_auth authentication": { - expected: []map[string]interface{}{ - { - "managed_streaming_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.MSKStartPositionLatest, - "credentials": []map[string]interface{}{ - { - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "managed_streaming_kafka config with sasl_scram_512_auth authentication": { - expected: []map[string]interface{}{ - { - "managed_streaming_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.MSKStartPositionLatest, - "credentials": []map[string]interface{}{ - { - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.PipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker config": { - expected: []map[string]interface{}{ - { - "rabbit_mq_broker": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "queue": "test", - "virtual_host": "hosting", - "credentials": []map[string]interface{}{ - { - "basic_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - RabbitMQBrokerParameters: &types.PipeSourceRabbitMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - QueueName: aws.String("test"), - VirtualHost: aws.String("hosting"), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with basic_auth authentication": { - expected: []map[string]interface{}{ - { - "self_managed_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.SelfManagedKafkaStartPositionLatest, - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []map[string]interface{}{ - { - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []map[string]interface{}{ - { - "basic_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with client_certificate_tls_auth authentication": { - expected: []map[string]interface{}{ - { - "self_managed_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.SelfManagedKafkaStartPositionLatest, - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []map[string]interface{}{ - { - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []map[string]interface{}{ - { - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_512_auth authentication": { - expected: []map[string]interface{}{ - { - "self_managed_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.SelfManagedKafkaStartPositionLatest, - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []map[string]interface{}{ - { - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []map[string]interface{}{ - { - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_256_auth authentication": { - expected: []map[string]interface{}{ - { - "self_managed_kafka": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "topic": "test", - "consumer_group_id": "group", - "starting_position": types.SelfManagedKafkaStartPositionLatest, - "server_root_ca_certificate": "arn:ca:cert", - "servers": schema.NewSet(schema.HashString, []interface{}{ - "server1", - "server2", - }), - "vpc": []map[string]interface{}{ - { - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []map[string]interface{}{ - { - "sasl_scram_256_auth": "arn:secrets", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - SelfManagedKafkaParameters: &types.PipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - TopicName: aws.String("test"), - ConsumerGroupID: aws.String("group"), - StartingPosition: "LATEST", - ServerRootCaCertificate: aws.String("arn:ca:cert"), - AdditionalBootstrapServers: []string{"server2", "server1"}, - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "sqs_queue config": { - expected: []map[string]interface{}{ - { - "sqs_queue": []map[string]interface{}{ - { - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - SqsQueueParameters: &types.PipeSourceSqsQueueParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - }, - }, - }, - "filter_criteria config": { - expected: []map[string]interface{}{ - { - "filter_criteria": []map[string]interface{}{ - { - "filter": []map[string]interface{}{ - { - "pattern": "1", - }, - { - "pattern": "2", - }, - }, - }, - }, - }, - }, - config: &types.PipeSourceParameters{ - FilterCriteria: &types.FilterCriteria{ - Filters: []types.Filter{ - { - Pattern: aws.String("1"), - }, - { - Pattern: aws.String("2"), - }, - }, - }, - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := flattenSourceParameters(tt.config) - - assert.Equal(t, tt.expected, got) - }) - } -} - -func Test_expandSourceUpdateParameters(t *testing.T) { - tests := map[string]struct { - config map[string]interface{} - expected *types.UpdatePipeSourceParameters - }{ - "active_mq_broker config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - ActiveMQBrokerParameters: &types.UpdatePipeSourceActiveMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "dynamo_db_stream config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": "AUTOMATIC_BISECT", - "dead_letter_config": []interface{}{ - map[string]interface{}{ - "arn": "arn:queue", - }, - }, - }, - }, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - DynamoDBStreamParameters: &types.UpdatePipeSourceDynamoDBStreamParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "kinesis_stream config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "maximum_record_age_in_seconds": int32(120), - "maximum_retry_attempts": int32(3), - "parallelization_factor": int32(1), - "on_partial_batch_item_failure": "AUTOMATIC_BISECT", - "dead_letter_config": []interface{}{ - map[string]interface{}{ - "arn": "arn:queue", - }, - }, - }, - }, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - KinesisStreamParameters: &types.UpdatePipeSourceKinesisStreamParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - MaximumRecordAgeInSeconds: aws.Int32(120), - MaximumRetryAttempts: aws.Int32(3), - ParallelizationFactor: aws.Int32(1), - OnPartialBatchItemFailure: "AUTOMATIC_BISECT", - DeadLetterConfig: &types.DeadLetterConfig{ - Arn: aws.String("arn:queue"), - }, - }, - }, - }, - "managed_streaming_kafka config with client_certificate_tls_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "credentials": []interface{}{ - map[string]interface{}{ - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.UpdatePipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - Credentials: &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "managed_streaming_kafka config with sasl_scram_512_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - ManagedStreamingKafkaParameters: &types.UpdatePipeSourceManagedStreamingKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - Credentials: &types.MSKAccessCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "rabbit_mq_broker config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - RabbitMQBrokerParameters: &types.UpdatePipeSourceRabbitMQBrokerParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - Credentials: &types.MQBrokerAccessCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with basic_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "server_root_ca_certificate": "arn:ca:cert", - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "basic_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - ServerRootCaCertificate: aws.String("arn:ca:cert"), - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with client_certificate_tls_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "server_root_ca_certificate": "arn:ca:cert", - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "client_certificate_tls_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - ServerRootCaCertificate: aws.String("arn:ca:cert"), - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_512_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "server_root_ca_certificate": "arn:ca:cert", - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_512_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - ServerRootCaCertificate: aws.String("arn:ca:cert"), - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "self_managed_kafka config with sasl_scram_256_auth authentication": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - "server_root_ca_certificate": "arn:ca:cert", - "vpc": []interface{}{ - map[string]interface{}{ - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - "credentials": []interface{}{ - map[string]interface{}{ - "sasl_scram_256_auth": "arn:secrets", - }, - }, - }, - }, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - SelfManagedKafkaParameters: &types.UpdatePipeSourceSelfManagedKafkaParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - ServerRootCaCertificate: aws.String("arn:ca:cert"), - Vpc: &types.SelfManagedKafkaAccessConfigurationVpc{ - SecurityGroup: []string{"sg2", "sg1"}, - Subnets: []string{"subnet1", "subnet2"}, - }, - Credentials: &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ - Value: "arn:secrets", - }, - }, - }, - }, - "sqs_queue config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{ - map[string]interface{}{ - "batch_size": int32(10), - "maximum_batching_window_in_seconds": int32(60), - }, - }, - "filter_criteria": []interface{}{}, - }, - expected: &types.UpdatePipeSourceParameters{ - SqsQueueParameters: &types.UpdatePipeSourceSqsQueueParameters{ - BatchSize: aws.Int32(10), - MaximumBatchingWindowInSeconds: aws.Int32(60), - }, - }, - }, - "filter_criteria config": { - config: map[string]interface{}{ - "active_mq_broker": []interface{}{}, - "dynamo_db_stream": []interface{}{}, - "kinesis_stream": []interface{}{}, - "managed_streaming_kafka": []interface{}{}, - "rabbit_mq_broker": []interface{}{}, - "self_managed_kafka": []interface{}{}, - "sqs_queue": []interface{}{}, - "filter_criteria": []interface{}{ - map[string]interface{}{ - "filter": []interface{}{ - map[string]interface{}{ - "pattern": "1", - }, - map[string]interface{}{ - "pattern": "2", - }, - }, - }, - }, - }, - expected: &types.UpdatePipeSourceParameters{ - FilterCriteria: &types.FilterCriteria{ - Filters: []types.Filter{ - { - Pattern: aws.String("1"), - }, - { - Pattern: aws.String("2"), - }, - }, - }, - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := expandSourceUpdateParameters([]interface{}{tt.config}) - - assert.Equal(t, tt.expected, got) - }) - } -} diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 5177fe8f703..c44a9307e01 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -869,6 +869,30 @@ func targetParametersSchema() *schema.Schema { } } +func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetParameters{} + + // ... nested attribute handling ... + + return apiObject +} + +func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + // ... nested attribute handling ... + + return tfMap +} + func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { if len(config) == 0 { return nil diff --git a/internal/service/pipes/target_parameters_test.go b/internal/service/pipes/target_parameters_test.go deleted file mode 100644 index a525f9fa262..00000000000 --- a/internal/service/pipes/target_parameters_test.go +++ /dev/null @@ -1,1110 +0,0 @@ -package pipes - -import ( - "testing" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_expandTargetParameters(t *testing.T) { - tests := map[string]struct { - config map[string]interface{} - expected *types.PipeTargetParameters - }{ - "batch_target config": { - config: map[string]interface{}{ - "batch_target": []interface{}{ - map[string]interface{}{ - "job_definition": "job:test", - "job_name": "test", - "retry_strategy": []interface{}{ - map[string]interface{}{ - "attempts": int32(2), - }, - }, - "array_properties": []interface{}{ - map[string]interface{}{ - "size": int32(50), - }, - }, - "parameters": []interface{}{ - map[string]interface{}{ - "key": "key1", - "value": "value1", - }, - map[string]interface{}{ - "key": "key2", - "value": "value2", - }, - }, - "depends_on": []interface{}{ - map[string]interface{}{ - "job_id": "jobID1", - "type": "N_TO_N", - }, - map[string]interface{}{ - "job_id": "jobID2", - "type": "SEQUENTIAL", - }, - }, - "container_overrides": []interface{}{ - map[string]interface{}{ - "command": schema.NewSet(schema.HashString, []interface{}{ - "command1", - "command2", - }), - "environment": []interface{}{ - map[string]interface{}{ - "name": "env1", - "value": "valueEnv1", - }, - map[string]interface{}{ - "name": "env2", - "value": "valueEnv2", - }, - }, - "instance_type": "instanceType", - "resource_requirements": []interface{}{ - map[string]interface{}{ - "type": "VCPU", - "value": "4", - }, - }, - }, - }, - }, - }, - }, - expected: &types.PipeTargetParameters{ - BatchJobParameters: &types.PipeTargetBatchJobParameters{ - JobDefinition: aws.String("job:test"), - JobName: aws.String("test"), - RetryStrategy: &types.BatchRetryStrategy{ - Attempts: 2, - }, - ArrayProperties: &types.BatchArrayProperties{ - Size: 50, - }, - Parameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - DependsOn: []types.BatchJobDependency{ - { - JobId: aws.String("jobID1"), - Type: types.BatchJobDependencyTypeNToN, - }, - { - JobId: aws.String("jobID2"), - Type: types.BatchJobDependencyTypeSequential, - }, - }, - ContainerOverrides: &types.BatchContainerOverrides{ - Command: []string{"command2", "command1"}, - Environment: []types.BatchEnvironmentVariable{ - { - Name: aws.String("env1"), - Value: aws.String("valueEnv1"), - }, - { - Name: aws.String("env2"), - Value: aws.String("valueEnv2"), - }, - }, - InstanceType: aws.String("instanceType"), - ResourceRequirements: []types.BatchResourceRequirement{ - { - Type: types.BatchResourceRequirementTypeVcpu, - Value: aws.String("4"), - }, - }, - }, - }, - }, - }, - "cloudwatch_logs config": { - config: map[string]interface{}{ - "cloudwatch_logs": []interface{}{ - map[string]interface{}{ - "log_stream_name": "job:test", - "timestamp": "2020-01-01T00:00:00Z", - }, - }, - }, - expected: &types.PipeTargetParameters{ - CloudWatchLogsParameters: &types.PipeTargetCloudWatchLogsParameters{ - LogStreamName: aws.String("job:test"), - Timestamp: aws.String("2020-01-01T00:00:00Z"), - }, - }, - }, - "ecs_task config": { - config: map[string]interface{}{ - "ecs_task": []interface{}{ - map[string]interface{}{ - "task_definition_arn": "arn:test", - "capacity_provider_strategy": []interface{}{ - map[string]interface{}{ - "capacity_provider": "capacityProvider", - "weight": int32(1), - "base": int32(10), - }, - }, - "enable_ecs_managed_tags": true, - "enable_execute_command": true, - "group": "group", - "launch_type": "FARGATE", - "network_configuration": []interface{}{ - map[string]interface{}{ - "aws_vpc_configuration": []interface{}{ - map[string]interface{}{ - "assign_public_ip": "ENABLED", - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - }, - }, - "placement_constraints": []interface{}{ - map[string]interface{}{ - "type": "memberOf", - "expression": "expression", - }, - }, - "placement_strategy": []interface{}{ - map[string]interface{}{ - "type": "binpack", - "field": "field", - }, - }, - "platform_version": "platformVersion", - "propagate_tags": "TASK_DEFINITION", - "reference_id": "referenceID", - "task_count": int32(1), - "tags": []interface{}{ - map[string]interface{}{ - "key": "key1", - "value": "value1", - }, - }, - "overrides": []interface{}{ - map[string]interface{}{ - "cpu": "cpu1", - "memory": "mem2", - "execution_role_arn": "arn:role", - "task_role_arn": "arn:role2", - "inference_accelerator_overrides": []interface{}{ - map[string]interface{}{ - "device_name": "deviceName", - "device_type": "deviceType", - }, - }, - "ecs_ephemeral_storage": []interface{}{ - map[string]interface{}{ - "size_in_gib": int32(30), - }, - }, - "container_overrides": []interface{}{ - map[string]interface{}{ - "cpu": int32(5), - "memory": int32(6), - "memory_reservation": int32(7), - "name": "name", - "command": schema.NewSet(schema.HashString, []interface{}{ - "command1", - "command2", - }), - "environment": []interface{}{ - map[string]interface{}{ - "name": "env1", - "value": "valueEnv1", - }, - }, - "environment_files": []interface{}{ - map[string]interface{}{ - "value": "some:arnvalue", - "type": "s3", - }, - }, - "resource_requirements": []interface{}{ - map[string]interface{}{ - "type": "GPU", - "value": "4", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expected: &types.PipeTargetParameters{ - EcsTaskParameters: &types.PipeTargetEcsTaskParameters{ - TaskDefinitionArn: aws.String("arn:test"), - CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ - { - CapacityProvider: aws.String("capacityProvider"), - Weight: 1, - Base: 10, - }, - }, - EnableECSManagedTags: true, - EnableExecuteCommand: true, - Group: aws.String("group"), - LaunchType: types.LaunchTypeFargate, - NetworkConfiguration: &types.NetworkConfiguration{ - AwsvpcConfiguration: &types.AwsVpcConfiguration{ - AssignPublicIp: types.AssignPublicIpEnabled, - SecurityGroups: []string{ - "sg2", - "sg1", - }, - Subnets: []string{ - "subnet1", - "subnet2", - }, - }, - }, - PlacementConstraints: []types.PlacementConstraint{ - { - Type: types.PlacementConstraintTypeMemberOf, - Expression: aws.String("expression"), - }, - }, - PlacementStrategy: []types.PlacementStrategy{ - { - Type: types.PlacementStrategyTypeBinpack, - Field: aws.String("field"), - }, - }, - PlatformVersion: aws.String("platformVersion"), - PropagateTags: types.PropagateTagsTaskDefinition, - ReferenceId: aws.String("referenceID"), - TaskCount: aws.Int32(1), - Tags: []types.Tag{ - { - Key: aws.String("key1"), - Value: aws.String("value1"), - }, - }, - Overrides: &types.EcsTaskOverride{ - Cpu: aws.String("cpu1"), - Memory: aws.String("mem2"), - ExecutionRoleArn: aws.String("arn:role"), - TaskRoleArn: aws.String("arn:role2"), - InferenceAcceleratorOverrides: []types.EcsInferenceAcceleratorOverride{ - { - DeviceName: aws.String("deviceName"), - DeviceType: aws.String("deviceType"), - }, - }, - EphemeralStorage: &types.EcsEphemeralStorage{ - SizeInGiB: 30, - }, - ContainerOverrides: []types.EcsContainerOverride{ - { - Cpu: aws.Int32(5), - Memory: aws.Int32(6), - MemoryReservation: aws.Int32(7), - Name: aws.String("name"), - Command: []string{"command2", "command1"}, - Environment: []types.EcsEnvironmentVariable{ - { - Name: aws.String("env1"), - Value: aws.String("valueEnv1"), - }, - }, - EnvironmentFiles: []types.EcsEnvironmentFile{ - { - Value: aws.String("some:arnvalue"), - Type: types.EcsEnvironmentFileTypeS3, - }, - }, - ResourceRequirements: []types.EcsResourceRequirement{ - { - Type: types.EcsResourceRequirementTypeGpu, - Value: aws.String("4"), - }, - }, - }, - }, - }, - }, - }, - }, - "event_bridge_event_bus config": { - config: map[string]interface{}{ - "event_bridge_event_bus": []interface{}{ - map[string]interface{}{ - "detail_type": "some.event", - "endpoint_id": "endpointID", - "source": "source", - "time": "2020-01-01T00:00:00Z", - "resources": schema.NewSet(schema.HashString, []interface{}{ - "id1", - "id2", - }), - }, - }, - }, - expected: &types.PipeTargetParameters{ - EventBridgeEventBusParameters: &types.PipeTargetEventBridgeEventBusParameters{ - DetailType: aws.String("some.event"), - EndpointId: aws.String("endpointID"), - Source: aws.String("source"), - Time: aws.String("2020-01-01T00:00:00Z"), - Resources: []string{ - "id2", - "id1", - }, - }, - }, - }, - "http_parameters config": { - config: map[string]interface{}{ - "http_parameters": []interface{}{ - map[string]interface{}{ - "path_parameters": []interface{}{"a", "b"}, - "header": []interface{}{ - map[string]interface{}{ - "key": "key1", - "value": "value1", - }, - map[string]interface{}{ - "key": "key2", - "value": "value2", - }, - }, - "query_string": []interface{}{ - map[string]interface{}{ - "key": "key3", - "value": "value3", - }, - map[string]interface{}{ - "key": "key4", - "value": "value4", - }, - }, - }, - }, - }, - expected: &types.PipeTargetParameters{ - HttpParameters: &types.PipeTargetHttpParameters{ - PathParameterValues: []string{"a", "b"}, - HeaderParameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - QueryStringParameters: map[string]string{ - "key3": "value3", - "key4": "value4", - }, - }, - }, - }, - "kinesis_stream config": { - config: map[string]interface{}{ - "kinesis_stream": []interface{}{ - map[string]interface{}{ - "partition_key": "partitionKey", - }, - }, - }, - expected: &types.PipeTargetParameters{ - KinesisStreamParameters: &types.PipeTargetKinesisStreamParameters{ - PartitionKey: aws.String("partitionKey"), - }, - }, - }, - "lambda_function config": { - config: map[string]interface{}{ - "lambda_function": []interface{}{ - map[string]interface{}{ - "invocation_type": "FIRE_AND_FORGET", - }, - }, - }, - expected: &types.PipeTargetParameters{ - LambdaFunctionParameters: &types.PipeTargetLambdaFunctionParameters{ - InvocationType: types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - "redshift_data config": { - config: map[string]interface{}{ - "redshift_data": []interface{}{ - map[string]interface{}{ - "database": "database", - "database_user": "database_user", - "secret_manager_arn": "arn:secrets", - "statement_name": "statement_name", - "with_event": true, - "sqls": schema.NewSet(schema.HashString, []interface{}{ - "sql2", - "sql1", - }), - }, - }, - }, - expected: &types.PipeTargetParameters{ - RedshiftDataParameters: &types.PipeTargetRedshiftDataParameters{ - Database: aws.String("database"), - DbUser: aws.String("database_user"), - SecretManagerArn: aws.String("arn:secrets"), - StatementName: aws.String("statement_name"), - WithEvent: true, - Sqls: []string{"sql2", "sql1"}, - }, - }, - }, - "sagemaker_pipeline config": { - config: map[string]interface{}{ - "sagemaker_pipeline": []interface{}{ - map[string]interface{}{ - "parameters": []interface{}{ - map[string]interface{}{ - "name": "name1", - "value": "value1", - }, - map[string]interface{}{ - "name": "name2", - "value": "value2", - }, - }, - }, - }, - }, - expected: &types.PipeTargetParameters{ - SageMakerPipelineParameters: &types.PipeTargetSageMakerPipelineParameters{ - PipelineParameterList: []types.SageMakerPipelineParameter{ - { - Name: aws.String("name1"), - Value: aws.String("value1"), - }, - { - Name: aws.String("name2"), - Value: aws.String("value2"), - }, - }, - }, - }, - }, - "sqs_queue config": { - config: map[string]interface{}{ - "sqs_queue": []interface{}{ - map[string]interface{}{ - "message_deduplication_id": "deduplication-id", - "message_group_id": "group-id", - }, - }, - }, - expected: &types.PipeTargetParameters{ - SqsQueueParameters: &types.PipeTargetSqsQueueParameters{ - MessageDeduplicationId: aws.String("deduplication-id"), - MessageGroupId: aws.String("group-id"), - }, - }, - }, - "step_function config": { - config: map[string]interface{}{ - "step_function": []interface{}{ - map[string]interface{}{ - "invocation_type": "FIRE_AND_FORGET", - }, - }, - }, - expected: &types.PipeTargetParameters{ - StepFunctionStateMachineParameters: &types.PipeTargetStateMachineParameters{ - InvocationType: types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - "input_template config": { - config: map[string]interface{}{ - "input_template": "some template", - }, - expected: &types.PipeTargetParameters{ - InputTemplate: aws.String("some template"), - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := expandTargetParameters([]interface{}{tt.config}) - - assert.Equal(t, tt.expected, got) - }) - } -} - -func Test_flattenTargetParameters(t *testing.T) { - tests := map[string]struct { - expected []map[string]interface{} - config *types.PipeTargetParameters - }{ - "batch_target config": { - expected: []map[string]interface{}{ - { - "batch_target": []map[string]interface{}{ - { - "job_definition": "job:test", - "job_name": "test", - "retry_strategy": []map[string]interface{}{ - { - "attempts": int32(2), - }, - }, - "array_properties": []map[string]interface{}{ - { - "size": int32(50), - }, - }, - "parameters": []map[string]interface{}{ - { - "key": "key1", - "value": "value1", - }, - { - "key": "key2", - "value": "value2", - }, - }, - "depends_on": []map[string]interface{}{ - { - "job_id": "jobID1", - "type": types.BatchJobDependencyTypeNToN, - }, - { - "job_id": "jobID2", - "type": types.BatchJobDependencyTypeSequential, - }, - }, - "container_overrides": []map[string]interface{}{ - { - "command": schema.NewSet(schema.HashString, []interface{}{ - "command1", - "command2", - }), - "environment": []map[string]interface{}{ - { - "name": "env1", - "value": "valueEnv1", - }, - { - "name": "env2", - "value": "valueEnv2", - }, - }, - "instance_type": "instanceType", - "resource_requirements": []map[string]interface{}{ - { - "type": types.BatchResourceRequirementTypeVcpu, - "value": "4", - }, - }, - }, - }, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - BatchJobParameters: &types.PipeTargetBatchJobParameters{ - JobDefinition: aws.String("job:test"), - JobName: aws.String("test"), - RetryStrategy: &types.BatchRetryStrategy{ - Attempts: 2, - }, - ArrayProperties: &types.BatchArrayProperties{ - Size: 50, - }, - Parameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - DependsOn: []types.BatchJobDependency{ - { - JobId: aws.String("jobID1"), - Type: types.BatchJobDependencyTypeNToN, - }, - { - JobId: aws.String("jobID2"), - Type: types.BatchJobDependencyTypeSequential, - }, - }, - ContainerOverrides: &types.BatchContainerOverrides{ - Command: []string{"command2", "command1"}, - Environment: []types.BatchEnvironmentVariable{ - { - Name: aws.String("env1"), - Value: aws.String("valueEnv1"), - }, - { - Name: aws.String("env2"), - Value: aws.String("valueEnv2"), - }, - }, - InstanceType: aws.String("instanceType"), - ResourceRequirements: []types.BatchResourceRequirement{ - { - Type: types.BatchResourceRequirementTypeVcpu, - Value: aws.String("4"), - }, - }, - }, - }, - }, - }, - "cloudwatch_logs config": { - expected: []map[string]interface{}{ - { - "cloudwatch_logs": []map[string]interface{}{ - { - "log_stream_name": "job:test", - "timestamp": "2020-01-01T00:00:00Z", - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - CloudWatchLogsParameters: &types.PipeTargetCloudWatchLogsParameters{ - LogStreamName: aws.String("job:test"), - Timestamp: aws.String("2020-01-01T00:00:00Z"), - }, - }, - }, - "ecs_task config": { - expected: []map[string]interface{}{ - { - "ecs_task": []map[string]interface{}{ - { - "task_definition_arn": "arn:test", - "capacity_provider_strategy": []map[string]interface{}{ - { - "capacity_provider": "capacityProvider", - "weight": int32(1), - "base": int32(10), - }, - }, - "enable_ecs_managed_tags": true, - "enable_execute_command": true, - "group": "group", - "launch_type": types.LaunchTypeFargate, - "network_configuration": []map[string]interface{}{ - { - "aws_vpc_configuration": []map[string]interface{}{ - { - "assign_public_ip": types.AssignPublicIpEnabled, - "security_groups": schema.NewSet(schema.HashString, []interface{}{ - "sg1", - "sg2", - }), - "subnets": schema.NewSet(schema.HashString, []interface{}{ - "subnet1", - "subnet2", - }), - }, - }, - }, - }, - "placement_constraints": []map[string]interface{}{ - { - "type": types.PlacementConstraintTypeMemberOf, - "expression": "expression", - }, - }, - "placement_strategy": []map[string]interface{}{ - { - "type": types.PlacementStrategyTypeBinpack, - "field": "field", - }, - }, - "platform_version": "platformVersion", - "propagate_tags": types.PropagateTagsTaskDefinition, - "reference_id": "referenceID", - "task_count": int32(1), - "tags": []map[string]interface{}{ - { - "key": "key1", - "value": "value1", - }, - }, - "overrides": []map[string]interface{}{ - { - "cpu": "cpu1", - "memory": "mem2", - "execution_role_arn": "arn:role", - "task_role_arn": "arn:role2", - "inference_accelerator_overrides": []map[string]interface{}{ - { - "device_name": "deviceName", - "device_type": "deviceType", - }, - }, - "ecs_ephemeral_storage": []map[string]interface{}{ - { - "size_in_gib": int32(30), - }, - }, - "container_overrides": []map[string]interface{}{ - { - "cpu": int32(5), - "memory": int32(6), - "memory_reservation": int32(7), - "name": "name", - "command": schema.NewSet(schema.HashString, []interface{}{ - "command1", - "command2", - }), - "environment": []map[string]interface{}{ - { - "name": "env1", - "value": "valueEnv1", - }, - }, - "environment_files": []map[string]interface{}{ - { - "value": "some:arnvalue", - "type": types.EcsEnvironmentFileTypeS3, - }, - }, - "resource_requirements": []map[string]interface{}{ - { - "type": types.EcsResourceRequirementTypeGpu, - "value": "4", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - EcsTaskParameters: &types.PipeTargetEcsTaskParameters{ - TaskDefinitionArn: aws.String("arn:test"), - CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ - { - CapacityProvider: aws.String("capacityProvider"), - Weight: 1, - Base: 10, - }, - }, - EnableECSManagedTags: true, - EnableExecuteCommand: true, - Group: aws.String("group"), - LaunchType: types.LaunchTypeFargate, - NetworkConfiguration: &types.NetworkConfiguration{ - AwsvpcConfiguration: &types.AwsVpcConfiguration{ - AssignPublicIp: types.AssignPublicIpEnabled, - SecurityGroups: []string{ - "sg2", - "sg1", - }, - Subnets: []string{ - "subnet1", - "subnet2", - }, - }, - }, - PlacementConstraints: []types.PlacementConstraint{ - { - Type: types.PlacementConstraintTypeMemberOf, - Expression: aws.String("expression"), - }, - }, - PlacementStrategy: []types.PlacementStrategy{ - { - Type: types.PlacementStrategyTypeBinpack, - Field: aws.String("field"), - }, - }, - PlatformVersion: aws.String("platformVersion"), - PropagateTags: types.PropagateTagsTaskDefinition, - ReferenceId: aws.String("referenceID"), - TaskCount: aws.Int32(1), - Tags: []types.Tag{ - { - Key: aws.String("key1"), - Value: aws.String("value1"), - }, - }, - Overrides: &types.EcsTaskOverride{ - Cpu: aws.String("cpu1"), - Memory: aws.String("mem2"), - ExecutionRoleArn: aws.String("arn:role"), - TaskRoleArn: aws.String("arn:role2"), - InferenceAcceleratorOverrides: []types.EcsInferenceAcceleratorOverride{ - { - DeviceName: aws.String("deviceName"), - DeviceType: aws.String("deviceType"), - }, - }, - EphemeralStorage: &types.EcsEphemeralStorage{ - SizeInGiB: 30, - }, - ContainerOverrides: []types.EcsContainerOverride{ - { - Cpu: aws.Int32(5), - Memory: aws.Int32(6), - MemoryReservation: aws.Int32(7), - Name: aws.String("name"), - Command: []string{"command2", "command1"}, - Environment: []types.EcsEnvironmentVariable{ - { - Name: aws.String("env1"), - Value: aws.String("valueEnv1"), - }, - }, - EnvironmentFiles: []types.EcsEnvironmentFile{ - { - Value: aws.String("some:arnvalue"), - Type: types.EcsEnvironmentFileTypeS3, - }, - }, - ResourceRequirements: []types.EcsResourceRequirement{ - { - Type: types.EcsResourceRequirementTypeGpu, - Value: aws.String("4"), - }, - }, - }, - }, - }, - }, - }, - }, - "event_bridge_event_bus config": { - expected: []map[string]interface{}{ - { - "event_bridge_event_bus": []map[string]interface{}{ - { - "detail_type": "some.event", - "endpoint_id": "endpointID", - "source": "source", - "time": "2020-01-01T00:00:00Z", - "resources": schema.NewSet(schema.HashString, []interface{}{ - "id1", - "id2", - }), - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - EventBridgeEventBusParameters: &types.PipeTargetEventBridgeEventBusParameters{ - DetailType: aws.String("some.event"), - EndpointId: aws.String("endpointID"), - Source: aws.String("source"), - Time: aws.String("2020-01-01T00:00:00Z"), - Resources: []string{ - "id2", - "id1", - }, - }, - }, - }, - "http_parameters config": { - expected: []map[string]interface{}{ - { - "http_parameters": []map[string]interface{}{ - { - "path_parameters": []interface{}{"a", "b"}, - "header": []map[string]interface{}{ - { - "key": "key1", - "value": "value1", - }, - { - "key": "key2", - "value": "value2", - }, - }, - "query_string": []map[string]interface{}{ - { - "key": "key3", - "value": "value3", - }, - { - "key": "key4", - "value": "value4", - }, - }, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - HttpParameters: &types.PipeTargetHttpParameters{ - PathParameterValues: []string{"a", "b"}, - HeaderParameters: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - QueryStringParameters: map[string]string{ - "key3": "value3", - "key4": "value4", - }, - }, - }, - }, - "kinesis_stream config": { - expected: []map[string]interface{}{ - { - "kinesis_stream": []map[string]interface{}{ - { - "partition_key": "partitionKey", - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - KinesisStreamParameters: &types.PipeTargetKinesisStreamParameters{ - PartitionKey: aws.String("partitionKey"), - }, - }, - }, - "lambda_function config": { - expected: []map[string]interface{}{ - { - "lambda_function": []map[string]interface{}{ - { - "invocation_type": types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - LambdaFunctionParameters: &types.PipeTargetLambdaFunctionParameters{ - InvocationType: types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - "redshift_data config": { - expected: []map[string]interface{}{ - { - "redshift_data": []map[string]interface{}{ - { - "database": "database", - "database_user": "database_user", - "secret_manager_arn": "arn:secrets", - "statement_name": "statement_name", - "with_event": true, - "sqls": schema.NewSet(schema.HashString, []interface{}{ - "sql2", - "sql1", - }), - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - RedshiftDataParameters: &types.PipeTargetRedshiftDataParameters{ - Database: aws.String("database"), - DbUser: aws.String("database_user"), - SecretManagerArn: aws.String("arn:secrets"), - StatementName: aws.String("statement_name"), - WithEvent: true, - Sqls: []string{"sql2", "sql1"}, - }, - }, - }, - "sagemaker_pipeline config": { - expected: []map[string]interface{}{ - { - "sagemaker_pipeline": []map[string]interface{}{ - { - "parameters": []map[string]interface{}{ - { - "name": "name1", - "value": "value1", - }, - { - "name": "name2", - "value": "value2", - }, - }, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - SageMakerPipelineParameters: &types.PipeTargetSageMakerPipelineParameters{ - PipelineParameterList: []types.SageMakerPipelineParameter{ - { - Name: aws.String("name1"), - Value: aws.String("value1"), - }, - { - Name: aws.String("name2"), - Value: aws.String("value2"), - }, - }, - }, - }, - }, - "sqs_queue config": { - expected: []map[string]interface{}{ - { - "sqs_queue": []map[string]interface{}{ - { - "message_deduplication_id": "deduplication-id", - "message_group_id": "group-id", - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - SqsQueueParameters: &types.PipeTargetSqsQueueParameters{ - MessageDeduplicationId: aws.String("deduplication-id"), - MessageGroupId: aws.String("group-id"), - }, - }, - }, - "step_function config": { - expected: []map[string]interface{}{ - { - "step_function": []map[string]interface{}{ - { - "invocation_type": types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - }, - config: &types.PipeTargetParameters{ - StepFunctionStateMachineParameters: &types.PipeTargetStateMachineParameters{ - InvocationType: types.PipeTargetInvocationTypeFireAndForget, - }, - }, - }, - "input_template config": { - expected: []map[string]interface{}{ - { - "input_template": "some template", - }, - }, - config: &types.PipeTargetParameters{ - InputTemplate: aws.String("some template"), - }, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - got := flattenTargetParameters(tt.config) - - assert.Equal(t, tt.expected, got) - }) - } -} diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 4436a0dbb87..cefeb6d7105 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -104,28 +104,16 @@ resource "aws_pipes_pipe" "example" { enrichment = aws_cloudwatch_event_api_destination.example.arn enrichment_parameters { - http_parameters { - header { - key = "example-header" - value = "example-value" - } - - header { - key = "second-example-header" - value = "second-example-value" - } + http_parameters = { + "example-header" = "example-value" + "second-example-header" = "second-example-value" + } - path_parameters = ["example-path-param"] + path_parameter_values = ["example-path-param"] - query_string { - key = "example-query-string" - value = "example-value" - } - - query_string { - key = "second-example-query-string" - value = "second-example-value" - } + query_string_parameters = { + "example-query-string" = "example-value" + "second-example-query-string" = "second-example-value" } } } @@ -206,19 +194,9 @@ You can find out more about EventBridge Pipes Enrichment in the [User Guide](htt #### enrichment_parameters.http_parameters Configuration Block -* `header` - (Optional) The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. -* `path_parameters` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). -* `query_string` - (Optional) The query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. - -##### enrichment_parameters.http_parameters.header Configuration Block - -* `key` - (Optional) The name of the header. Maximum length of 512 characters. -* `value` - (Optional) The header value. Maximum length of 512 characters. - -##### enrichment_parameters.http_parameters.query_string Configuration Block - -* `key` - (Optional) The name of the query string. Maximum length of 512 characters. -* `value` - (Optional) The header query string. Maximum length of 512 characters. +* `header_parameters` - (Optional) Key-value mapping of the headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. +* `path_parameter_values` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string_parameters` - (Optional) Key-value mapping of the query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. ### source_parameters Configuration Block From 1299833b878ab9cff9bded9802898dc7a7604fd9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sun, 11 Jun 2023 16:54:38 -0400 Subject: [PATCH 13/65] r/aws_pipes_pipe: 'source_parameters.filter_criteria' changes. --- internal/service/pipes/source_parameters.go | 104 +++++++++++++++++++- 1 file changed, 100 insertions(+), 4 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index ac446ca6ab2..c3aa9bce77b 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -2,14 +2,12 @@ package pipes import ( "regexp" - "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/enum" - "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -630,7 +628,9 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject := &types.PipeSourceParameters{} - // ... nested attribute handling ... + if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + } return apiObject } @@ -647,6 +647,60 @@ func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.Updat return apiObject } +func expandFilterCriteria(tfMap map[string]interface{}) *types.FilterCriteria { + if tfMap == nil { + return nil + } + + apiObject := &types.FilterCriteria{} + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { + apiObject.Filters = expandFilters(v) + } + + return apiObject +} + +func expandFilter(tfMap map[string]interface{}) *types.Filter { + if tfMap == nil { + return nil + } + + apiObject := &types.Filter{} + + if v, ok := tfMap["pattern"].(string); ok && v != "" { + apiObject.Pattern = aws.String(v) + } + + return apiObject +} + +func expandFilters(tfList []interface{}) []types.Filter { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.Filter + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandFilter(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { if apiObject == nil { return nil @@ -654,11 +708,52 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap := map[string]interface{}{} - // ... nested attribute handling ... + if v := apiObject.FilterCriteria; v != nil { + tfMap["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} + } + + return tfMap +} + +func flattenFilterCriteria(apiObject *types.FilterCriteria) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Filters; v != nil { + tfMap["filter"] = flattenFilters(v) + } + + return tfMap +} + +func flattenFilter(apiObject types.Filter) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Pattern; v != nil { + tfMap["pattern"] = aws.ToString(v) + } return tfMap } +func flattenFilters(apiObjects []types.Filter) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenFilter(apiObject)) + } + + return tfList +} + +/* func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { return nil @@ -1541,3 +1636,4 @@ func expandSourceUpdateSqsQueueParameters(config []interface{}) *types.UpdatePip return ¶meters } +*/ From f67104cc99bb2b9e573e452beb74aeb6b420918e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 12 Jun 2023 16:46:27 -0400 Subject: [PATCH 14/65] r/aws_pipes_pipe: 'source_parameters.sqs_queue' -> 'source_parameters.sqs_queue_parameters'. --- internal/service/pipes/source_parameters.go | 58 ++++++++++++++++++--- website/docs/r/pipes_pipe.html.markdown | 6 +-- 2 files changed, 54 insertions(+), 10 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index c3aa9bce77b..98cfd08b1f4 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -31,7 +31,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -93,7 +93,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -201,7 +201,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -291,7 +291,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -372,7 +372,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.self_managed_kafka", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -443,7 +443,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", - "source_parameters.0.sqs_queue", + "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -576,7 +576,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "sqs_queue": { + "sqs_queue_parameters": { Type: schema.TypeList, Optional: true, Computed: true, @@ -632,6 +632,10 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) } + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandPipeSourceSqsQueueParameters(v[0].(map[string]interface{})) + } + return apiObject } @@ -701,6 +705,24 @@ func expandFilters(tfList []interface{}) []types.Filter { return apiObjects } +func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceSqsQueueParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { if apiObject == nil { return nil @@ -712,6 +734,10 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} } + if v := apiObject.SqsQueueParameters; v != nil { + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSqsQueueParameters(v)} + } + return tfMap } @@ -753,6 +779,24 @@ func flattenFilters(apiObjects []types.Filter) []interface{} { return tfList } +func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + return tfMap +} + /* func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index cefeb6d7105..3f75b83fe10 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -150,7 +150,7 @@ resource "aws_pipes_pipe" "example" { target = aws_sqs_queue.target.arn source_parameters { - sqs_queue { + sqs_queue_parameters { batch_size = 1 maximum_batching_window_in_seconds = 2 } @@ -209,7 +209,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `managed_streaming_kafka` - (Optional) The parameters for using an MSK stream as a source. Detailed below. * `rabbit_mq_broker` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. * `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. -* `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. +* `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. #### source_parameters.filter_criteria Configuration Block @@ -311,7 +311,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `security_groups` - (Optional) List of security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. * `subnets` - (Optional) List of the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets. -#### source_parameters.sqs_queue Configuration Block +#### source_parameters.sqs_queue_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. From fc4286f7f139c0334d895b5c8de4bdbb49c5e28f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 12 Jun 2023 17:04:51 -0400 Subject: [PATCH 15/65] r/aws_pipes_pipe: 'source_parameters.dynamo_db_stream' -> 'source_parameters.dynamodb_stream_parameters'. --- internal/service/pipes/source_parameters.go | 148 ++++++++++++++++++-- website/docs/r/pipes_pipe.html.markdown | 6 +- 2 files changed, 143 insertions(+), 11 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 98cfd08b1f4..78461fe38b5 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -26,7 +26,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", @@ -83,7 +83,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "dynamo_db_stream": { + "dynamodb_stream_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -197,7 +197,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", @@ -287,7 +287,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", @@ -368,7 +368,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.self_managed_kafka", @@ -439,7 +439,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", @@ -583,7 +583,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.active_mq_broker", - "source_parameters.0.dynamo_db_stream", + "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", @@ -628,10 +628,20 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject := &types.PipeSourceParameters{} + // TODO + + if v, ok := tfMap["dynamodb_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DynamoDBStreamParameters = expandPipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) + } + + // TODO + if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) } + // TODO + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.SqsQueueParameters = expandPipeSourceSqsQueueParameters(v[0].(map[string]interface{})) } @@ -646,7 +656,7 @@ func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.Updat apiObject := &types.UpdatePipeSourceParameters{} - // ... nested attribute handling ... + // TODO return apiObject } @@ -705,6 +715,62 @@ func expandFilters(tfList []interface{}) []types.Filter { return apiObjects } +func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.PipeSourceDynamoDBStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceDynamoDBStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok && v != 0 { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok && v != "" { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok && v != 0 { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.DynamoDBStreamStartPosition(v) + } + + return apiObject +} + +func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { + if tfMap == nil { + return nil + } + + apiObject := &types.DeadLetterConfig{} + + if v, ok := tfMap["arn"].(string); ok && v != "" { + apiObject.Arn = aws.String(v) + } + + return apiObject +} + func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { if tfMap == nil { return nil @@ -730,10 +796,20 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap := map[string]interface{}{} + // TODO + + if v := apiObject.DynamoDBStreamParameters; v != nil { + tfMap["dynamodb_stream_parameters"] = []interface{}{flattenPipeSourceDynamoDBStreamParameters(v)} + } + + // TODO + if v := apiObject.FilterCriteria; v != nil { tfMap["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} } + // TODO + if v := apiObject.SqsQueueParameters; v != nil { tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSqsQueueParameters(v)} } @@ -797,6 +873,62 @@ func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueuePara return tfMap } +func flattenPipeSourceDynamoDBStreamParameters(apiObject *types.PipeSourceDynamoDBStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.DeadLetterConfig; v != nil { + tfMap["dead_letter_config"] = []interface{}{flattenDeadLetterConfig(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRecordAgeInSeconds; v != nil { + tfMap["maximum_record_age_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRetryAttempts; v != nil { + tfMap["maximum_retry_attempts"] = aws.ToInt32(v) + } + + if v := apiObject.OnPartialBatchItemFailure; v != "" { + tfMap["on_partial_batch_item_failure"] = v + } + + if v := apiObject.ParallelizationFactor; v != nil { + tfMap["parallelization_factor"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + return tfMap +} + +func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Arn; v != nil { + tfMap["arn"] = aws.ToString(v) + } + + return tfMap +} + /* func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 3f75b83fe10..9a719126cb8 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -203,7 +203,7 @@ You can find out more about EventBridge Pipes Enrichment in the [User Guide](htt You can find out more about EventBridge Pipes Sources in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-source.html). * `active_mq_broker` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. -* `dynamo_db_stream` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. +* `dynamodb_stream_parameters` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. * `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `managed_streaming_kafka` - (Optional) The parameters for using an MSK stream as a source. Detailed below. @@ -230,7 +230,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the basic auth credentials. -#### source_parameters.dynamo_db_stream Configuration Block +#### source_parameters.dynamodb_stream_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. @@ -241,7 +241,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `parallelization_factor` - (Optional)The number of batches to process concurrently from each shard. The default value is 1. Maximum value of 10. * `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. -##### source_parameters.dynamo_db_stream.dead_letter_config Configuration Block +##### source_parameters.dynamodb_stream_parameters.dead_letter_config Configuration Block * `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. From 3e6bfecc5f7ef91a82fa55af487cc7f1f0cee161 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 12 Jun 2023 17:19:34 -0400 Subject: [PATCH 16/65] r/aws_pipes_pipe: 'source_parameters.active_mq_broker' -> 'source_parameters.activemq_broker_parameters'. --- internal/service/pipes/source_parameters.go | 106 ++++++++++++++++++-- website/docs/r/pipes_pipe.html.markdown | 8 +- 2 files changed, 99 insertions(+), 15 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 78461fe38b5..f7b03bd714b 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -21,7 +21,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "active_mq_broker": { + "activemq_broker_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -71,7 +71,7 @@ func sourceParametersSchema() *schema.Schema { return old == "0" }, }, - "queue": { + "queue_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -88,7 +88,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", @@ -196,7 +196,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", @@ -286,7 +286,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.rabbit_mq_broker", @@ -367,7 +367,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", @@ -438,7 +438,7 @@ func sourceParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", @@ -582,7 +582,7 @@ func sourceParametersSchema() *schema.Schema { Computed: true, MaxItems: 1, ConflictsWith: []string{ - "source_parameters.0.active_mq_broker", + "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream", "source_parameters.0.managed_streaming_kafka", @@ -628,7 +628,9 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject := &types.PipeSourceParameters{} - // TODO + if v, ok := tfMap["activemq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ActiveMQBrokerParameters = expandPipeSourceActiveMQBrokerParameters(v[0].(map[string]interface{})) + } if v, ok := tfMap["dynamodb_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.DynamoDBStreamParameters = expandPipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) @@ -715,6 +717,46 @@ func expandFilters(tfList []interface{}) []types.Filter { return apiObjects } +func expandPipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *types.PipeSourceActiveMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceActiveMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentialsMemberBasicAuth(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["queue_name"].(string); ok && v != "" { + apiObject.QueueName = aws.String(v) + } + + return apiObject +} + +func expandMQBrokerAccessCredentialsMemberBasicAuth(tfMap map[string]interface{}) types.MQBrokerAccessCredentials { + if tfMap == nil { + return nil + } + + apiObject := &types.MQBrokerAccessCredentialsMemberBasicAuth{} + + if v, ok := tfMap["basic_auth"].(string); ok && v != "" { + apiObject.Value = v + } + + return apiObject +} + func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.PipeSourceDynamoDBStreamParameters { if tfMap == nil { return nil @@ -796,7 +838,9 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap := map[string]interface{}{} - // TODO + if v := apiObject.ActiveMQBrokerParameters; v != nil { + tfMap["activemq_broker_parameters"] = []interface{}{flattenPipeSourceActiveMQBrokerParameters(v)} + } if v := apiObject.DynamoDBStreamParameters; v != nil { tfMap["dynamodb_stream_parameters"] = []interface{}{flattenPipeSourceDynamoDBStreamParameters(v)} @@ -855,7 +899,7 @@ func flattenFilters(apiObjects []types.Filter) []interface{} { return tfList } -func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { +func flattenPipeSourceActiveMQBrokerParameters(apiObject *types.PipeSourceActiveMQBrokerParameters) map[string]interface{} { if apiObject == nil { return nil } @@ -866,10 +910,32 @@ func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueuePara tfMap["batch_size"] = aws.ToInt32(v) } + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentialsMemberBasicAuth(v.(*types.MQBrokerAccessCredentialsMemberBasicAuth))} + } + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) } + if v := apiObject.QueueName; v != nil { + tfMap["queue_name"] = aws.ToString(v) + } + + return tfMap +} + +func flattenMQBrokerAccessCredentialsMemberBasicAuth(apiObject *types.MQBrokerAccessCredentialsMemberBasicAuth) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Value; v != "" { + tfMap["basic_auth"] = v + } + return tfMap } @@ -929,6 +995,24 @@ func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]inter return tfMap } +func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + return tfMap +} + /* func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 9a719126cb8..552c01336b6 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -202,7 +202,7 @@ You can find out more about EventBridge Pipes Enrichment in the [User Guide](htt You can find out more about EventBridge Pipes Sources in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-source.html). -* `active_mq_broker` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. +* `activemq_broker_parameters` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. * `dynamodb_stream_parameters` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. * `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. @@ -219,14 +219,14 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `pattern` - (Required) The event pattern. At most 4096 characters. -#### source_parameters.active_mq_broker Configuration Block +#### source_parameters.activemq_broker_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `credentials` - (Required) The credentials needed to access the resource. Detailed below. * `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. -* `queue` - (Required) The name of the destination queue to consume. Maximum length of 1000. +* `queue_name` - (Required) The name of the destination queue to consume. Maximum length of 1000. -##### source_parameters.active_mq_broker.credentials Configuration Block +##### source_parameters.activemq_broker_parameters.credentials Configuration Block * `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the basic auth credentials. From 526fce8bac7d3103fe79790f213e1a8e12d8c25e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 09:41:23 -0400 Subject: [PATCH 17/65] r/aws_pipes_pipe: 'source_parameters.kinesis_stream' -> 'source_parameters.kinesis_stream_parameters'. --- internal/service/pipes/source_parameters.go | 135 +++++++++++++++++--- website/docs/r/pipes_pipe.html.markdown | 6 +- 2 files changed, 120 insertions(+), 21 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index f7b03bd714b..cd81e35b888 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -2,6 +2,7 @@ package pipes import ( "regexp" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes/types" @@ -27,7 +28,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", @@ -89,7 +90,7 @@ func sourceParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", @@ -191,7 +192,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "kinesis_stream": { + "kinesis_stream_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -288,7 +289,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", @@ -369,7 +370,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", @@ -440,7 +441,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.sqs_queue_parameters", @@ -584,7 +585,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.kinesis_stream", + "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", @@ -636,12 +637,14 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject.DynamoDBStreamParameters = expandPipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) } - // TODO - if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) } + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandPipeSourceKinesisStreamParameters(v[0].(map[string]interface{})) + } + // TODO if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -799,15 +802,49 @@ func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *typ return apiObject } -func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { +func expandPipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeSourceKinesisStreamParameters { if tfMap == nil { return nil } - apiObject := &types.DeadLetterConfig{} + apiObject := &types.PipeSourceKinesisStreamParameters{} - if v, ok := tfMap["arn"].(string); ok && v != "" { - apiObject.Arn = aws.String(v) + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok && v != 0 { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok && v != "" { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok && v != 0 { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.KinesisStreamStartPosition(v) + } + + if v, ok := tfMap["starting_position_timestamp"].(string); ok && v != "" { + v, _ := time.Parse(time.RFC3339, v) + + apiObject.StartingPositionTimestamp = aws.Time(v) } return apiObject @@ -831,6 +868,20 @@ func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.Pip return apiObject } +func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { + if tfMap == nil { + return nil + } + + apiObject := &types.DeadLetterConfig{} + + if v, ok := tfMap["arn"].(string); ok && v != "" { + apiObject.Arn = aws.String(v) + } + + return apiObject +} + func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[string]interface{} { if apiObject == nil { return nil @@ -846,12 +897,14 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap["dynamodb_stream_parameters"] = []interface{}{flattenPipeSourceDynamoDBStreamParameters(v)} } - // TODO - if v := apiObject.FilterCriteria; v != nil { tfMap["filter_criteria"] = []interface{}{flattenFilterCriteria(v)} } + if v := apiObject.KinesisStreamParameters; v != nil { + tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeSourceKinesisStreamParameters(v)} + } + // TODO if v := apiObject.SqsQueueParameters; v != nil { @@ -981,15 +1034,47 @@ func flattenPipeSourceDynamoDBStreamParameters(apiObject *types.PipeSourceDynamo return tfMap } -func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { +func flattenPipeSourceKinesisStreamParameters(apiObject *types.PipeSourceKinesisStreamParameters) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.Arn; v != nil { - tfMap["arn"] = aws.ToString(v) + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.DeadLetterConfig; v != nil { + tfMap["dead_letter_config"] = []interface{}{flattenDeadLetterConfig(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRecordAgeInSeconds; v != nil { + tfMap["maximum_record_age_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.MaximumRetryAttempts; v != nil { + tfMap["maximum_retry_attempts"] = aws.ToInt32(v) + } + + if v := apiObject.OnPartialBatchItemFailure; v != "" { + tfMap["on_partial_batch_item_failure"] = v + } + + if v := apiObject.ParallelizationFactor; v != nil { + tfMap["parallelization_factor"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + if v := apiObject.StartingPositionTimestamp; v != nil { + tfMap["starting_position_timestamp"] = aws.ToTime(v).Format(time.RFC3339) } return tfMap @@ -1013,6 +1098,20 @@ func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueuePara return tfMap } +func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Arn; v != nil { + tfMap["arn"] = aws.ToString(v) + } + + return tfMap +} + /* func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { if len(config) == 0 { diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 552c01336b6..c8be3852a97 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -205,7 +205,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `activemq_broker_parameters` - (Optional) The parameters for using an Active MQ broker as a source. Detailed below. * `dynamodb_stream_parameters` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. * `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. -* `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `managed_streaming_kafka` - (Optional) The parameters for using an MSK stream as a source. Detailed below. * `rabbit_mq_broker` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. * `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. @@ -245,7 +245,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. -#### source_parameters.kinesis_stream Configuration Block +#### source_parameters.kinesis_stream_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `dead_letter_config` - (Optional) Define the target queue to send dead-letter queue events to. Detailed below. @@ -257,7 +257,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `starting_position` - (Required) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST, AT_TIMESTAMP. * `starting_position_timestamp` - (Optional) With StartingPosition set to AT_TIMESTAMP, the time from which to start reading, in Unix time seconds. -##### source_parameters.kinesis_stream.dead_letter_config Configuration Block +##### source_parameters.kinesis_stream_parameters.dead_letter_config Configuration Block * `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. From e0b8793cb47df5333167f59e31074e399a1dd7e3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 10:00:16 -0400 Subject: [PATCH 18/65] r/aws_pipes_pipe: 'source_parameters.managed_streaming_kafka' -> 'source_parameters.managed_streaming_kafka_parameters'. --- internal/service/pipes/source_parameters.go | 138 ++++++++++++++++++-- website/docs/r/pipes_pipe.html.markdown | 8 +- 2 files changed, 134 insertions(+), 12 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index cd81e35b888..ea31b3b123b 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -29,7 +29,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", @@ -91,7 +91,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", @@ -199,7 +199,7 @@ func sourceParametersSchema() *schema.Schema { ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", @@ -282,7 +282,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "managed_streaming_kafka": { + "managed_streaming_kafka_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -351,7 +351,7 @@ func sourceParametersSchema() *schema.Schema { ForceNew: true, ValidateDiagFunc: enum.Validate[types.MSKStartPosition](), }, - "topic": { + "topic_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -371,7 +371,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", }, @@ -442,7 +442,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.sqs_queue_parameters", }, @@ -586,7 +586,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.managed_streaming_kafka", + "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbit_mq_broker", "source_parameters.0.self_managed_kafka", }, @@ -645,6 +645,10 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject.KinesisStreamParameters = expandPipeSourceKinesisStreamParameters(v[0].(map[string]interface{})) } + if v, ok := tfMap["managed_streaming_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ManagedStreamingKafkaParameters = expandPipeSourceManagedStreamingKafkaParameters(v[0].(map[string]interface{})) + } + // TODO if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -850,6 +854,64 @@ func expandPipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *type return apiObject } +func expandPipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceManagedStreamingKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceManagedStreamingKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["consumer_group_id"].(string); ok && v != "" { + apiObject.ConsumerGroupID = aws.String(v) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMSKAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.MSKStartPosition(v) + } + + if v, ok := tfMap["topic_name"].(string); ok && v != "" { + apiObject.TopicName = aws.String(v) + } + + return apiObject +} + +func expandMSKAccessCredentials(tfMap map[string]interface{}) types.MSKAccessCredentials { + if tfMap == nil { + return nil + } + + if v, ok := tfMap["client_certificate_tls_auth"].(string); ok && v != "" { + apiObject := &types.MSKAccessCredentialsMemberClientCertificateTlsAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_512_auth"].(string); ok && v != "" { + apiObject := &types.MSKAccessCredentialsMemberSaslScram512Auth{ + Value: v, + } + + return apiObject + } + + return nil +} + func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { if tfMap == nil { return nil @@ -905,6 +967,10 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeSourceKinesisStreamParameters(v)} } + if v := apiObject.ManagedStreamingKafkaParameters; v != nil { + tfMap["managed_streaming_kafka_parameters"] = []interface{}{flattenPipeSourceManagedStreamingKafkaParameters(v)} + } + // TODO if v := apiObject.SqsQueueParameters; v != nil { @@ -1080,6 +1146,62 @@ func flattenPipeSourceKinesisStreamParameters(apiObject *types.PipeSourceKinesis return tfMap } +func flattenPipeSourceManagedStreamingKafkaParameters(apiObject *types.PipeSourceManagedStreamingKafkaParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.ConsumerGroupID; v != nil { + tfMap["consumer_group_id"] = aws.ToString(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMSKAccessCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v + } + + if v := apiObject.TopicName; v != nil { + tfMap["topic_name"] = aws.ToString(v) + } + + return tfMap +} + +func flattenMSKAccessCredentials(apiObject types.MSKAccessCredentials) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if apiObject, ok := apiObject.(*types.MSKAccessCredentialsMemberClientCertificateTlsAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["client_certificate_tls_auth"] = v + } + } + + if apiObject, ok := apiObject.(*types.MSKAccessCredentialsMemberSaslScram512Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_512_auth"] = v + } + } + + return tfMap +} + func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index c8be3852a97..dfd6b258f75 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -206,7 +206,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `dynamodb_stream_parameters` - (Optional) The parameters for using a DynamoDB stream as a source. Detailed below. * `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. * `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. -* `managed_streaming_kafka` - (Optional) The parameters for using an MSK stream as a source. Detailed below. +* `managed_streaming_kafka_parameters` - (Optional) The parameters for using an MSK stream as a source. Detailed below. * `rabbit_mq_broker` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. * `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. @@ -261,16 +261,16 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `arn` - (Optional) The ARN of the Amazon SQS queue specified as the target for the dead-letter queue. -#### source_parameters.managed_streaming_kafka Configuration Block +#### source_parameters.managed_streaming_kafka_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. * `credentials` - (Optional) The credentials needed to access the resource. Detailed below. * `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. * `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. -* `topic` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. +* `topic_name` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. -##### source_parameters.managed_streaming_kafka.credentials Configuration Block +##### source_parameters.managed_streaming_kafka_parameters.credentials Configuration Block * `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. * `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. From f37a79b11179c2f247e47615b70fee1babb34d9b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 10:34:07 -0400 Subject: [PATCH 19/65] r/aws_pipes_pipe: 'source_parameters.rabbit_mq_broker' -> 'source_parameters.rabbitmq_broker_parameters'. --- internal/service/pipes/source_parameters.go | 108 ++++++++++++++++---- website/docs/r/pipes_pipe.html.markdown | 8 +- 2 files changed, 94 insertions(+), 22 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index ea31b3b123b..5d91d35bc92 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -30,7 +30,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", }, @@ -92,7 +92,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", }, @@ -200,7 +200,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", }, @@ -290,7 +290,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.activemq_broker_parameters", "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.self_managed_kafka", "source_parameters.0.sqs_queue_parameters", }, @@ -363,7 +363,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "rabbit_mq_broker": { + "rabbitmq_broker_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -413,7 +413,7 @@ func sourceParametersSchema() *schema.Schema { return old == "0" }, }, - "queue": { + "queue_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -443,7 +443,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -587,7 +587,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.rabbit_mq_broker", + "source_parameters.0.rabbitmq_broker_parameters", "source_parameters.0.self_managed_kafka", }, Elem: &schema.Resource{ @@ -649,6 +649,10 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject.ManagedStreamingKafkaParameters = expandPipeSourceManagedStreamingKafkaParameters(v[0].(map[string]interface{})) } + if v, ok := tfMap["rabbitmq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RabbitMQBrokerParameters = expandPipeSourceRabbitMQBrokerParameters(v[0].(map[string]interface{})) + } + // TODO if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -736,7 +740,7 @@ func expandPipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *typ } if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.Credentials = expandMQBrokerAccessCredentialsMemberBasicAuth(v[0].(map[string]interface{})) + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) } if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { @@ -750,18 +754,20 @@ func expandPipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *typ return apiObject } -func expandMQBrokerAccessCredentialsMemberBasicAuth(tfMap map[string]interface{}) types.MQBrokerAccessCredentials { +func expandMQBrokerAccessCredentials(tfMap map[string]interface{}) types.MQBrokerAccessCredentials { if tfMap == nil { return nil } - apiObject := &types.MQBrokerAccessCredentialsMemberBasicAuth{} - if v, ok := tfMap["basic_auth"].(string); ok && v != "" { - apiObject.Value = v + apiObject := &types.MQBrokerAccessCredentialsMemberBasicAuth{ + Value: v, + } + + return apiObject } - return apiObject + return nil } func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.PipeSourceDynamoDBStreamParameters { @@ -912,6 +918,36 @@ func expandMSKAccessCredentials(tfMap map[string]interface{}) types.MSKAccessCre return nil } +func expandPipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *types.PipeSourceRabbitMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceRabbitMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["queue_name"].(string); ok && v != "" { + apiObject.QueueName = aws.String(v) + } + + if v, ok := tfMap["virtual_host"].(string); ok && v != "" { + apiObject.VirtualHost = aws.String(v) + } + + return apiObject +} + func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { if tfMap == nil { return nil @@ -971,6 +1007,10 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap["managed_streaming_kafka_parameters"] = []interface{}{flattenPipeSourceManagedStreamingKafkaParameters(v)} } + if v := apiObject.RabbitMQBrokerParameters; v != nil { + tfMap["rabbitmq_broker_parameters"] = []interface{}{flattenPipeSourceRabbitMQBrokerParameters(v)} + } + // TODO if v := apiObject.SqsQueueParameters; v != nil { @@ -1030,7 +1070,7 @@ func flattenPipeSourceActiveMQBrokerParameters(apiObject *types.PipeSourceActive } if v := apiObject.Credentials; v != nil { - tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentialsMemberBasicAuth(v.(*types.MQBrokerAccessCredentialsMemberBasicAuth))} + tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentials(v)} } if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { @@ -1044,15 +1084,17 @@ func flattenPipeSourceActiveMQBrokerParameters(apiObject *types.PipeSourceActive return tfMap } -func flattenMQBrokerAccessCredentialsMemberBasicAuth(apiObject *types.MQBrokerAccessCredentialsMemberBasicAuth) map[string]interface{} { +func flattenMQBrokerAccessCredentials(apiObject types.MQBrokerAccessCredentials) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.Value; v != "" { - tfMap["basic_auth"] = v + if apiObject, ok := apiObject.(*types.MQBrokerAccessCredentialsMemberBasicAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["basic_auth"] = v + } } return tfMap @@ -1202,6 +1244,36 @@ func flattenMSKAccessCredentials(apiObject types.MSKAccessCredentials) map[strin return tfMap } +func flattenPipeSourceRabbitMQBrokerParameters(apiObject *types.PipeSourceRabbitMQBrokerParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) + } + + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenMQBrokerAccessCredentials(v)} + } + + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) + } + + if v := apiObject.QueueName; v != nil { + tfMap["queue_name"] = aws.ToString(v) + } + + if v := apiObject.VirtualHost; v != nil { + tfMap["virtual_host"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index dfd6b258f75..dc60a1d64eb 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -207,7 +207,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `filter_criteria` - (Optional) The collection of event patterns used to [filter events](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-filtering.html). Detailed below. * `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `managed_streaming_kafka_parameters` - (Optional) The parameters for using an MSK stream as a source. Detailed below. -* `rabbit_mq_broker` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. +* `rabbitmq_broker_parameters` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. * `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. @@ -275,15 +275,15 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. * `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. -#### source_parameters.rabbit_mq_broker Configuration Block +#### source_parameters.rabbitmq_broker_parameters Configuration Block * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `credentials` - (Required) The credentials needed to access the resource. Detailed below. * `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. -* `queue` - (Required) The name of the destination queue to consume. Maximum length of 1000. +* `queue_name` - (Required) The name of the destination queue to consume. Maximum length of 1000. * `virtual_host` - (Optional) The name of the virtual host associated with the source broker. Maximum length of 200. -##### source_parameters.rabbit_mq_broker.credentials Configuration Block +##### source_parameters.rabbitmq_broker_parameters.credentials Configuration Block * `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the credentials. From 0fb485808d267ac5d73c93d2d8572ab9b9cc577c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 11:33:54 -0400 Subject: [PATCH 20/65] r/aws_pipes_pipe: 'source_parameters.self_managed_kafka' -> 'source_parameters.self_managed_kafka_parameters'. --- internal/service/pipes/source_parameters.go | 1058 ++++--------------- website/docs/r/pipes_pipe.html.markdown | 12 +- 2 files changed, 196 insertions(+), 874 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 5d91d35bc92..a184f00bff6 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -31,7 +32,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbitmq_broker_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -93,7 +94,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbitmq_broker_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -201,7 +202,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbitmq_broker_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -291,7 +292,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.rabbitmq_broker_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -372,7 +373,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.dynamodb_stream_parameters", "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", "source_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -434,7 +435,7 @@ func sourceParametersSchema() *schema.Schema { }, }, }, - "self_managed_kafka": { + "self_managed_kafka_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -448,6 +449,19 @@ func sourceParametersSchema() *schema.Schema { }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "additional_bootstrap_servers": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 300), + validation.StringMatch(regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}$`), ""), + ), + }, + }, "batch_size": { Type: schema.TypeInt, Optional: true, @@ -513,26 +527,13 @@ func sourceParametersSchema() *schema.Schema { Optional: true, ValidateFunc: verify.ValidARN, }, - "servers": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - MaxItems: 2, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 300), - validation.StringMatch(regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9]):[0-9]{1,5}$`), ""), - ), - }, - }, "starting_position": { Type: schema.TypeString, Optional: true, ForceNew: true, ValidateDiagFunc: enum.Validate[types.SelfManagedKafkaStartPosition](), }, - "topic": { + "topic_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -588,7 +589,7 @@ func sourceParametersSchema() *schema.Schema { "source_parameters.0.kinesis_stream_parameters", "source_parameters.0.managed_streaming_kafka_parameters", "source_parameters.0.rabbitmq_broker_parameters", - "source_parameters.0.self_managed_kafka", + "source_parameters.0.self_managed_kafka_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -653,7 +654,9 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP apiObject.RabbitMQBrokerParameters = expandPipeSourceRabbitMQBrokerParameters(v[0].(map[string]interface{})) } - // TODO + if v, ok := tfMap["self_managed_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SelfManagedKafkaParameters = expandPipeSourceSelfManagedKafkaParameters(v[0].(map[string]interface{})) + } if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.SqsQueueParameters = expandPipeSourceSqsQueueParameters(v[0].(map[string]interface{})) @@ -948,6 +951,110 @@ func expandPipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *typ return apiObject } +func expandPipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceSelfManagedKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeSourceSelfManagedKafkaParameters{} + + if v, ok := tfMap["additional_bootstrap_servers"].(*schema.Set); ok && v.Len() > 0 { + apiObject.AdditionalBootstrapServers = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["consumer_group_id"].(string); ok && v != "" { + apiObject.ConsumerGroupID = aws.String(v) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandSelfManagedKafkaAccessConfigurationCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok && v != 0 { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["server_root_ca_certificate"].(string); ok && v != "" { + apiObject.ServerRootCaCertificate = aws.String(v) + } + + if v, ok := tfMap["starting_position"].(string); ok && v != "" { + apiObject.StartingPosition = types.SelfManagedKafkaStartPosition(v) + } + + if v, ok := tfMap["topic_name"].(string); ok && v != "" { + apiObject.TopicName = aws.String(v) + } + + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVpc(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandSelfManagedKafkaAccessConfigurationCredentials(tfMap map[string]interface{}) types.SelfManagedKafkaAccessConfigurationCredentials { + if tfMap == nil { + return nil + } + + if v, ok := tfMap["basic_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["client_certificate_tls_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_256_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth{ + Value: v, + } + + return apiObject + } + + if v, ok := tfMap["sasl_scram_512_auth"].(string); ok && v != "" { + apiObject := &types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth{ + Value: v, + } + + return apiObject + } + + return nil +} + +func expandSelfManagedKafkaAccessConfigurationVpc(tfMap map[string]interface{}) *types.SelfManagedKafkaAccessConfigurationVpc { + if tfMap == nil { + return nil + } + + apiObject := &types.SelfManagedKafkaAccessConfigurationVpc{} + + if v, ok := tfMap["security_groups"].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroup = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["subnets"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Subnets = flex.ExpandStringValueSet(v) + } + + return apiObject +} + func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { if tfMap == nil { return nil @@ -1011,7 +1118,9 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri tfMap["rabbitmq_broker_parameters"] = []interface{}{flattenPipeSourceRabbitMQBrokerParameters(v)} } - // TODO + if v := apiObject.SelfManagedKafkaParameters; v != nil { + tfMap["self_managed_kafka_parameters"] = []interface{}{flattenPipeSourceSelfManagedKafkaParameters(v)} + } if v := apiObject.SqsQueueParameters; v != nil { tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSqsQueueParameters(v)} @@ -1274,919 +1383,132 @@ func flattenPipeSourceRabbitMQBrokerParameters(apiObject *types.PipeSourceRabbit return tfMap } -func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { +func flattenPipeSourceSelfManagedKafkaParameters(apiObject *types.PipeSourceSelfManagedKafkaParameters) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.BatchSize; v != nil { - tfMap["batch_size"] = aws.ToInt32(v) - } - - if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { - tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) - } - - return tfMap -} - -func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { - if apiObject == nil { - return nil - } - - tfMap := map[string]interface{}{} - - if v := apiObject.Arn; v != nil { - tfMap["arn"] = aws.ToString(v) - } - - return tfMap -} - -/* -func expandSourceParameters(config []interface{}) *types.PipeSourceParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceParameters - for _, c := range config { - param, ok := c.(map[string]interface{}) - if !ok { - return nil - } - - if val, ok := param["active_mq_broker"]; ok { - parameters.ActiveMQBrokerParameters = expandSourceActiveMQBrokerParameters(val.([]interface{})) - } - - if val, ok := param["dynamo_db_stream"]; ok { - parameters.DynamoDBStreamParameters = expandSourceDynamoDBStreamParameters(val.([]interface{})) - } - - if val, ok := param["kinesis_stream"]; ok { - parameters.KinesisStreamParameters = expandSourceKinesisStreamParameters(val.([]interface{})) - } - - if val, ok := param["managed_streaming_kafka"]; ok { - parameters.ManagedStreamingKafkaParameters = expandSourceManagedStreamingKafkaParameters(val.([]interface{})) - } - - if val, ok := param["rabbit_mq_broker"]; ok { - parameters.RabbitMQBrokerParameters = expandSourceRabbitMQBrokerParameters(val.([]interface{})) - } - - if val, ok := param["self_managed_kafka"]; ok { - parameters.SelfManagedKafkaParameters = expandSourceSelfManagedKafkaParameters(val.([]interface{})) - } - - if val, ok := param["sqs_queue"]; ok { - parameters.SqsQueueParameters = expandSourceSqsQueueParameters(val.([]interface{})) - } - - if val, ok := param["filter_criteria"]; ok { - parameters.FilterCriteria = expandSourceFilterCriteria(val.([]interface{})) - } - } - return ¶meters -} - -func expandSourceActiveMQBrokerParameters(config []interface{}) *types.PipeSourceActiveMQBrokerParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceActiveMQBrokerParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.QueueName = expandString("queue", param) - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - } - parameters.Credentials = &credentialsParameters - } - } - } - return ¶meters -} - -func expandSourceDynamoDBStreamParameters(config []interface{}) *types.PipeSourceDynamoDBStreamParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceDynamoDBStreamParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) - parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) - parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) - startingPosition := expandStringValue("starting_position", param) - if startingPosition != "" { - parameters.StartingPosition = types.DynamoDBStreamStartPosition(startingPosition) - } - onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) - if onPartialBatchItemFailure != "" { - parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) - } - if val, ok := param["dead_letter_config"]; ok { - parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) - } - } - return ¶meters -} - -func expandSourceKinesisStreamParameters(config []interface{}) *types.PipeSourceKinesisStreamParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceKinesisStreamParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) - parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) - parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) - - startingPosition := expandStringValue("starting_position", param) - if startingPosition != "" { - parameters.StartingPosition = types.KinesisStreamStartPosition(startingPosition) - } - onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) - if onPartialBatchItemFailure != "" { - parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) - } - if val, ok := param["starting_position_timestamp"]; ok { - t, _ := time.Parse(time.RFC3339, val.(string)) - - parameters.StartingPositionTimestamp = aws.Time(t) - } - if val, ok := param["dead_letter_config"]; ok { - parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) - } - } - return ¶meters -} - -func expandSourceManagedStreamingKafkaParameters(config []interface{}) *types.PipeSourceManagedStreamingKafkaParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceManagedStreamingKafkaParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.TopicName = expandString("topic", param) - parameters.ConsumerGroupID = expandString("consumer_group_id", param) - - startingPosition := expandStringValue("starting_position", param) - if startingPosition != "" { - parameters.StartingPosition = types.MSKStartPosition(startingPosition) - } - - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { - var credentialsParameters types.MSKAccessCredentialsMemberClientCertificateTlsAuth - credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { - var credentialsParameters types.MSKAccessCredentialsMemberSaslScram512Auth - credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - } - } - } - } - return ¶meters -} - -func expandSourceRabbitMQBrokerParameters(config []interface{}) *types.PipeSourceRabbitMQBrokerParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeSourceRabbitMQBrokerParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.QueueName = expandString("queue", param) - parameters.VirtualHost = expandString("virtual_host", param) - - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - } - parameters.Credentials = &credentialsParameters - } - } + if v := apiObject.AdditionalBootstrapServers; v != nil { + tfMap["additional_bootstrap_servers"] = v } - return ¶meters -} -func expandSourceSelfManagedKafkaParameters(config []interface{}) *types.PipeSourceSelfManagedKafkaParameters { - if len(config) == 0 { - return nil + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) } - var parameters types.PipeSourceSelfManagedKafkaParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.TopicName = expandString("topic", param) - parameters.ConsumerGroupID = expandString("consumer_group_id", param) - parameters.ServerRootCaCertificate = expandString("server_root_ca_certificate", param) - startingPosition := expandStringValue("starting_position", param) - if startingPosition != "" { - parameters.StartingPosition = types.SelfManagedKafkaStartPosition(startingPosition) - } - if value, ok := param["servers"]; ok && value.(*schema.Set).Len() > 0 { - parameters.AdditionalBootstrapServers = flex.ExpandStringValueSet(value.(*schema.Set)) - } - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - if _, ok := credentialsParam["basic_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth - credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth - credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_256_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth - credentialsParameters.Value = expandStringValue("sasl_scram_256_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - } - } - } - - if val, ok := param["vpc"]; ok { - vpcConfig := val.([]interface{}) - if len(vpcConfig) != 0 { - var vpcParameters types.SelfManagedKafkaAccessConfigurationVpc - for _, vc := range vpcConfig { - vpcParam := vc.(map[string]interface{}) - if value, ok := vpcParam["security_groups"]; ok && value.(*schema.Set).Len() > 0 { - vpcParameters.SecurityGroup = flex.ExpandStringValueSet(value.(*schema.Set)) - } - if value, ok := vpcParam["subnets"]; ok && value.(*schema.Set).Len() > 0 { - vpcParameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) - } - } - parameters.Vpc = &vpcParameters - } - } + if v := apiObject.ConsumerGroupID; v != nil { + tfMap["consumer_group_id"] = aws.ToString(v) } - return ¶meters -} - -func expandSourceSqsQueueParameters(config []interface{}) *types.PipeSourceSqsQueueParameters { - if len(config) == 0 { - return nil + if v := apiObject.Credentials; v != nil { + tfMap["credentials"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationCredentials(v)} } - var parameters types.PipeSourceSqsQueueParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) } - return ¶meters -} - -func expandSourceDeadLetterConfig(config []interface{}) *types.DeadLetterConfig { - if len(config) == 0 { - return nil + if v := apiObject.ServerRootCaCertificate; v != nil { + tfMap["server_root_ca_certificate"] = aws.ToString(v) } - var parameters types.DeadLetterConfig - for _, c := range config { - param := c.(map[string]interface{}) - parameters.Arn = expandString("arn", param) + if v := apiObject.StartingPosition; v != "" { + tfMap["starting_position"] = v } - return ¶meters -} - -func expandSourceFilterCriteria(config []interface{}) *types.FilterCriteria { - if len(config) == 0 { - return nil + if v := apiObject.TopicName; v != nil { + tfMap["topic_name"] = aws.ToString(v) } - var parameters types.FilterCriteria - for _, c := range config { - param := c.(map[string]interface{}) - if val, ok := param["filter"]; ok { - filtersConfig := val.([]interface{}) - var filters []types.Filter - for _, f := range filtersConfig { - filterParam := f.(map[string]interface{}) - pattern := expandString("pattern", filterParam) - if pattern != nil { - filters = append(filters, types.Filter{ - Pattern: pattern, - }) - } - } - if len(filters) > 0 { - parameters.Filters = filters - } - } + if v := apiObject.Vpc; v != nil { + tfMap["vpc"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationVpc(v)} } - return ¶meters + return tfMap } -func flattenSourceParameters(sourceParameters *types.PipeSourceParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if sourceParameters.ActiveMQBrokerParameters != nil { - config["active_mq_broker"] = flattenSourceActiveMQBrokerParameters(sourceParameters.ActiveMQBrokerParameters) - } - - if sourceParameters.DynamoDBStreamParameters != nil { - config["dynamo_db_stream"] = flattenSourceDynamoDBStreamParameters(sourceParameters.DynamoDBStreamParameters) - } - - if sourceParameters.KinesisStreamParameters != nil { - config["kinesis_stream"] = flattenSourceKinesisStreamParameters(sourceParameters.KinesisStreamParameters) - } - - if sourceParameters.ManagedStreamingKafkaParameters != nil { - config["managed_streaming_kafka"] = flattenSourceManagedStreamingKafkaParameters(sourceParameters.ManagedStreamingKafkaParameters) - } - - if sourceParameters.RabbitMQBrokerParameters != nil { - config["rabbit_mq_broker"] = flattenSourceRabbitMQBrokerParameters(sourceParameters.RabbitMQBrokerParameters) - } - - if sourceParameters.SelfManagedKafkaParameters != nil { - config["self_managed_kafka"] = flattenSourceSelfManagedKafkaParameters(sourceParameters.SelfManagedKafkaParameters) - } - - if sourceParameters.SqsQueueParameters != nil { - config["sqs_queue"] = flattenSourceSqsQueueParameters(sourceParameters.SqsQueueParameters) - } - - if sourceParameters.FilterCriteria != nil { - criteria := flattenSourceFilterCriteria(sourceParameters.FilterCriteria) - if len(criteria) > 0 { - config["filter_criteria"] = criteria - } - } - - if len(config) == 0 { +func flattenSelfManagedKafkaAccessConfigurationCredentials(apiObject types.SelfManagedKafkaAccessConfigurationCredentials) map[string]interface{} { + if apiObject == nil { return nil } - result := []map[string]interface{}{config} - return result -} - -func flattenSourceActiveMQBrokerParameters(parameters *types.PipeSourceActiveMQBrokerParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.QueueName != nil { - config["queue"] = aws.ToString(parameters.QueueName) - } - if parameters.Credentials != nil { - credentialsConfig := make(map[string]interface{}) - switch v := parameters.Credentials.(type) { - case *types.MQBrokerAccessCredentialsMemberBasicAuth: - credentialsConfig["basic_auth"] = v.Value - } - config["credentials"] = []map[string]interface{}{credentialsConfig} - } - - result := []map[string]interface{}{config} - return result -} - -func flattenSourceDynamoDBStreamParameters(parameters *types.PipeSourceDynamoDBStreamParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.MaximumRecordAgeInSeconds != nil { - config["maximum_record_age_in_seconds"] = aws.ToInt32(parameters.MaximumRecordAgeInSeconds) - } - if parameters.ParallelizationFactor != nil { - config["parallelization_factor"] = aws.ToInt32(parameters.ParallelizationFactor) - } - if parameters.MaximumRetryAttempts != nil { - config["maximum_retry_attempts"] = aws.ToInt32(parameters.MaximumRetryAttempts) - } - if parameters.StartingPosition != "" { - config["starting_position"] = parameters.StartingPosition - } - if parameters.OnPartialBatchItemFailure != "" { - config["on_partial_batch_item_failure"] = parameters.OnPartialBatchItemFailure - } - if parameters.DeadLetterConfig != nil { - config["dead_letter_config"] = flattenSourceDeadLetterConfig(parameters.DeadLetterConfig) - } - - result := []map[string]interface{}{config} - return result -} - -func flattenSourceKinesisStreamParameters(parameters *types.PipeSourceKinesisStreamParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.MaximumRecordAgeInSeconds != nil { - config["maximum_record_age_in_seconds"] = aws.ToInt32(parameters.MaximumRecordAgeInSeconds) - } - if parameters.ParallelizationFactor != nil { - config["parallelization_factor"] = aws.ToInt32(parameters.ParallelizationFactor) - } - if parameters.MaximumRetryAttempts != nil { - config["maximum_retry_attempts"] = aws.ToInt32(parameters.MaximumRetryAttempts) - } - if parameters.StartingPosition != "" { - config["starting_position"] = parameters.StartingPosition - } - if parameters.OnPartialBatchItemFailure != "" { - config["on_partial_batch_item_failure"] = parameters.OnPartialBatchItemFailure - } - if parameters.StartingPositionTimestamp != nil { - config["starting_position_timestamp"] = aws.ToTime(parameters.StartingPositionTimestamp).Format(time.RFC3339) - } - if parameters.DeadLetterConfig != nil { - config["dead_letter_config"] = flattenSourceDeadLetterConfig(parameters.DeadLetterConfig) - } - - result := []map[string]interface{}{config} - return result -} - -func flattenSourceManagedStreamingKafkaParameters(parameters *types.PipeSourceManagedStreamingKafkaParameters) []map[string]interface{} { - config := make(map[string]interface{}) + tfMap := map[string]interface{}{} - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.ConsumerGroupID != nil { - config["consumer_group_id"] = aws.ToString(parameters.ConsumerGroupID) - } - if parameters.StartingPosition != "" { - config["starting_position"] = parameters.StartingPosition - } - if parameters.TopicName != nil { - config["topic"] = aws.ToString(parameters.TopicName) - } - if parameters.Credentials != nil { - credentialsConfig := make(map[string]interface{}) - switch v := parameters.Credentials.(type) { - case *types.MSKAccessCredentialsMemberClientCertificateTlsAuth: - credentialsConfig["client_certificate_tls_auth"] = v.Value - case *types.MSKAccessCredentialsMemberSaslScram512Auth: - credentialsConfig["sasl_scram_512_auth"] = v.Value + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["basic_auth"] = v } - config["credentials"] = []map[string]interface{}{credentialsConfig} } - result := []map[string]interface{}{config} - return result -} - -func flattenSourceRabbitMQBrokerParameters(parameters *types.PipeSourceRabbitMQBrokerParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.QueueName != nil { - config["queue"] = aws.ToString(parameters.QueueName) - } - if parameters.VirtualHost != nil { - config["virtual_host"] = aws.ToString(parameters.VirtualHost) - } - if parameters.Credentials != nil { - credentialsConfig := make(map[string]interface{}) - switch v := parameters.Credentials.(type) { - case *types.MQBrokerAccessCredentialsMemberBasicAuth: - credentialsConfig["basic_auth"] = v.Value + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth); ok { + if v := apiObject.Value; v != "" { + tfMap["client_certificate_tls_auth"] = v } - config["credentials"] = []map[string]interface{}{credentialsConfig} } - result := []map[string]interface{}{config} - return result -} - -func flattenSourceSelfManagedKafkaParameters(parameters *types.PipeSourceSelfManagedKafkaParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) - } - if parameters.ConsumerGroupID != nil { - config["consumer_group_id"] = aws.ToString(parameters.ConsumerGroupID) - } - if parameters.StartingPosition != "" { - config["starting_position"] = parameters.StartingPosition - } - if parameters.TopicName != nil { - config["topic"] = aws.ToString(parameters.TopicName) - } - if parameters.AdditionalBootstrapServers != nil { - config["servers"] = flex.FlattenStringValueSet(parameters.AdditionalBootstrapServers) - } - if parameters.ServerRootCaCertificate != nil { - config["server_root_ca_certificate"] = aws.ToString(parameters.ServerRootCaCertificate) - } - - if parameters.Credentials != nil { - credentialsConfig := make(map[string]interface{}) - switch v := parameters.Credentials.(type) { - case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth: - credentialsConfig["basic_auth"] = v.Value - case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth: - credentialsConfig["client_certificate_tls_auth"] = v.Value - case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth: - credentialsConfig["sasl_scram_256_auth"] = v.Value - case *types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth: - credentialsConfig["sasl_scram_512_auth"] = v.Value + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_256_auth"] = v } - config["credentials"] = []map[string]interface{}{credentialsConfig} - } - if parameters.Vpc != nil { - vpcConfig := make(map[string]interface{}) - vpcConfig["security_groups"] = flex.FlattenStringValueSet(parameters.Vpc.SecurityGroup) - vpcConfig["subnets"] = flex.FlattenStringValueSet(parameters.Vpc.Subnets) - config["vpc"] = []map[string]interface{}{vpcConfig} - } - - result := []map[string]interface{}{config} - return result -} - -func flattenSourceSqsQueueParameters(parameters *types.PipeSourceSqsQueueParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.BatchSize != nil { - config["batch_size"] = aws.ToInt32(parameters.BatchSize) - } - if parameters.MaximumBatchingWindowInSeconds != nil && aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) != 0 { - config["maximum_batching_window_in_seconds"] = aws.ToInt32(parameters.MaximumBatchingWindowInSeconds) } - result := []map[string]interface{}{config} - return result -} - -func flattenSourceFilterCriteria(parameters *types.FilterCriteria) []map[string]interface{} { - config := make(map[string]interface{}) - - if len(parameters.Filters) != 0 { - var filters []map[string]interface{} - for _, filter := range parameters.Filters { - pattern := make(map[string]interface{}) - pattern["pattern"] = aws.ToString(filter.Pattern) - filters = append(filters, pattern) - } - if len(filters) != 0 { - config["filter"] = filters + if apiObject, ok := apiObject.(*types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth); ok { + if v := apiObject.Value; v != "" { + tfMap["sasl_scram_512_auth"] = v } } - result := []map[string]interface{}{config} - return result -} - -func flattenSourceDeadLetterConfig(parameters *types.DeadLetterConfig) []map[string]interface{} { - if parameters == nil { - return nil - } - - config := make(map[string]interface{}) - if parameters.Arn != nil { - config["arn"] = aws.ToString(parameters.Arn) - } - - result := []map[string]interface{}{config} - return result + return tfMap } -func expandSourceUpdateParameters(config []interface{}) *types.UpdatePipeSourceParameters { - if len(config) == 0 { +func flattenSelfManagedKafkaAccessConfigurationVpc(apiObject *types.SelfManagedKafkaAccessConfigurationVpc) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.UpdatePipeSourceParameters - for _, c := range config { - param, ok := c.(map[string]interface{}) - if !ok { - return nil - } - - if val, ok := param["active_mq_broker"]; ok { - parameters.ActiveMQBrokerParameters = expandSourceUpdateActiveMQBrokerParameters(val.([]interface{})) - } - - if val, ok := param["dynamo_db_stream"]; ok { - parameters.DynamoDBStreamParameters = expandSourceUpdateDynamoDBStreamParameters(val.([]interface{})) - } - - if val, ok := param["kinesis_stream"]; ok { - parameters.KinesisStreamParameters = expandSourceUpdateKinesisStreamParameters(val.([]interface{})) - } - - if val, ok := param["managed_streaming_kafka"]; ok { - parameters.ManagedStreamingKafkaParameters = expandSourceUpdateManagedStreamingKafkaParameters(val.([]interface{})) - } - - if val, ok := param["rabbit_mq_broker"]; ok { - parameters.RabbitMQBrokerParameters = expandSourceUpdateRabbitMQBrokerParameters(val.([]interface{})) - } - - if val, ok := param["self_managed_kafka"]; ok { - parameters.SelfManagedKafkaParameters = expandSourceUpdateSelfManagedKafkaParameters(val.([]interface{})) - } - - if val, ok := param["sqs_queue"]; ok { - parameters.SqsQueueParameters = expandSourceUpdateSqsQueueParameters(val.([]interface{})) - } - - if val, ok := param["filter_criteria"]; ok { - parameters.FilterCriteria = expandSourceFilterCriteria(val.([]interface{})) - } - } - return ¶meters -} - -func expandSourceUpdateActiveMQBrokerParameters(config []interface{}) *types.UpdatePipeSourceActiveMQBrokerParameters { - if len(config) == 0 { - return nil - } + tfMap := map[string]interface{}{} - var parameters types.UpdatePipeSourceActiveMQBrokerParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - } - parameters.Credentials = &credentialsParameters - } - } + if v := apiObject.SecurityGroup; v != nil { + tfMap["security_groups"] = v } - return ¶meters -} -func expandSourceUpdateDynamoDBStreamParameters(config []interface{}) *types.UpdatePipeSourceDynamoDBStreamParameters { - if len(config) == 0 { - return nil + if v := apiObject.Subnets; v != nil { + tfMap["subnets"] = v } - var parameters types.UpdatePipeSourceDynamoDBStreamParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) - parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) - parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) - onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) - if onPartialBatchItemFailure != "" { - parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) - } - if val, ok := param["dead_letter_config"]; ok { - parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) - } - } - return ¶meters + return tfMap } -func expandSourceUpdateKinesisStreamParameters(config []interface{}) *types.UpdatePipeSourceKinesisStreamParameters { - if len(config) == 0 { +func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.UpdatePipeSourceKinesisStreamParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.MaximumRecordAgeInSeconds = expandInt32("maximum_record_age_in_seconds", param) - parameters.ParallelizationFactor = expandInt32("parallelization_factor", param) - parameters.MaximumRetryAttempts = expandInt32("maximum_retry_attempts", param) - - onPartialBatchItemFailure := expandStringValue("on_partial_batch_item_failure", param) - if onPartialBatchItemFailure != "" { - parameters.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(onPartialBatchItemFailure) - } - if val, ok := param["dead_letter_config"]; ok { - parameters.DeadLetterConfig = expandSourceDeadLetterConfig(val.([]interface{})) - } - } - return ¶meters -} - -func expandSourceUpdateManagedStreamingKafkaParameters(config []interface{}) *types.UpdatePipeSourceManagedStreamingKafkaParameters { - if len(config) == 0 { - return nil - } + tfMap := map[string]interface{}{} - var parameters types.UpdatePipeSourceManagedStreamingKafkaParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { - var credentialsParameters types.MSKAccessCredentialsMemberClientCertificateTlsAuth - credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { - var credentialsParameters types.MSKAccessCredentialsMemberSaslScram512Auth - credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - } - } - } + if v := apiObject.BatchSize; v != nil { + tfMap["batch_size"] = aws.ToInt32(v) } - return ¶meters -} -func expandSourceUpdateRabbitMQBrokerParameters(config []interface{}) *types.UpdatePipeSourceRabbitMQBrokerParameters { - if len(config) == 0 { - return nil + if v := apiObject.MaximumBatchingWindowInSeconds; v != nil { + tfMap["maximum_batching_window_in_seconds"] = aws.ToInt32(v) } - var parameters types.UpdatePipeSourceRabbitMQBrokerParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - var credentialsParameters types.MQBrokerAccessCredentialsMemberBasicAuth - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - } - parameters.Credentials = &credentialsParameters - } - } - } - return ¶meters + return tfMap } -func expandSourceUpdateSelfManagedKafkaParameters(config []interface{}) *types.UpdatePipeSourceSelfManagedKafkaParameters { - if len(config) == 0 { +func flattenDeadLetterConfig(apiObject *types.DeadLetterConfig) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.UpdatePipeSourceSelfManagedKafkaParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) - parameters.ServerRootCaCertificate = expandString("server_root_ca_certificate", param) - - if val, ok := param["credentials"]; ok { - credentialsConfig := val.([]interface{}) - if len(credentialsConfig) != 0 { - for _, cc := range credentialsConfig { - credentialsParam := cc.(map[string]interface{}) - if _, ok := credentialsParam["basic_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberBasicAuth - credentialsParameters.Value = expandStringValue("basic_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["client_certificate_tls_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberClientCertificateTlsAuth - credentialsParameters.Value = expandStringValue("client_certificate_tls_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_512_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram512Auth - credentialsParameters.Value = expandStringValue("sasl_scram_512_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - if _, ok := credentialsParam["sasl_scram_256_auth"]; ok { - var credentialsParameters types.SelfManagedKafkaAccessConfigurationCredentialsMemberSaslScram256Auth - credentialsParameters.Value = expandStringValue("sasl_scram_256_auth", credentialsParam) - parameters.Credentials = &credentialsParameters - } - } - } - } - - if val, ok := param["vpc"]; ok { - vpcConfig := val.([]interface{}) - if len(vpcConfig) != 0 { - var vpcParameters types.SelfManagedKafkaAccessConfigurationVpc - for _, vc := range vpcConfig { - vpcParam := vc.(map[string]interface{}) - if value, ok := vpcParam["security_groups"]; ok && value.(*schema.Set).Len() > 0 { - vpcParameters.SecurityGroup = flex.ExpandStringValueSet(value.(*schema.Set)) - } - if value, ok := vpcParam["subnets"]; ok && value.(*schema.Set).Len() > 0 { - vpcParameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) - } - } - parameters.Vpc = &vpcParameters - } - } - } - - return ¶meters -} -func expandSourceUpdateSqsQueueParameters(config []interface{}) *types.UpdatePipeSourceSqsQueueParameters { - if len(config) == 0 { - return nil - } + tfMap := map[string]interface{}{} - var parameters types.UpdatePipeSourceSqsQueueParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.BatchSize = expandInt32("batch_size", param) - parameters.MaximumBatchingWindowInSeconds = expandInt32("maximum_batching_window_in_seconds", param) + if v := apiObject.Arn; v != nil { + tfMap["arn"] = aws.ToString(v) } - return ¶meters + return tfMap } -*/ diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index dc60a1d64eb..4edf1c61745 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -208,7 +208,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `managed_streaming_kafka_parameters` - (Optional) The parameters for using an MSK stream as a source. Detailed below. * `rabbitmq_broker_parameters` - (Optional) The parameters for using a Rabbit MQ broker as a source. Detailed below. -* `self_managed_kafka` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. +* `self_managed_kafka_parameters` - (Optional) The parameters for using a self-managed Apache Kafka stream as a source. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a source. Detailed below. #### source_parameters.filter_criteria Configuration Block @@ -287,26 +287,26 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: * `basic_auth` - (Required) The ARN of the Secrets Manager secret containing the credentials. -#### source_parameters.self_managed_kafka Configuration Block +#### source_parameters.self_managed_kafka_parameters Configuration Block -* `servers` - (Optional) An array of server URLs. Maximum number of 2 items, each of maximum length 300. +* `additional_bootstrap_servers` - (Optional) An array of server URLs. Maximum number of 2 items, each of maximum length 300. * `batch_size` - (Optional) The maximum number of records to include in each batch. Maximum value of 10000. * `consumer_group_id` - (Optional) The name of the destination queue to consume. Maximum value of 200. * `credentials` - (Optional) The credentials needed to access the resource. Detailed below. * `maximum_batching_window_in_seconds` - (Optional) The maximum length of a time to wait for events. Maximum value of 300. * `server_root_ca_certificate` - (Optional) The ARN of the Secrets Manager secret used for certification. * `starting_position` - (Optional) The position in a stream from which to start reading. Valid values: TRIM_HORIZON, LATEST. -* `topic` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. +* `topic_name` - (Required) The name of the topic that the pipe will read from. Maximum length of 249. * `vpc` - (Optional) This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used. Detailed below. -##### source_parameters.self_managed_kafka.credentials Configuration Block +##### source_parameters.self_managed_kafka_parameters.credentials Configuration Block * `basic_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. * `client_certificate_tls_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. * `sasl_scram_256_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. * `sasl_scram_512_auth` - (Optional) The ARN of the Secrets Manager secret containing the credentials. -##### source_parameters.self_managed_kafka.vpc Configuration Block +##### source_parameters.self_managed_kafka_parameters.vpc Configuration Block * `security_groups` - (Optional) List of security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. * `subnets` - (Optional) List of the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets. From 85b006013127f85559bf8d883c977e9c4e9de4bd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 11:49:40 -0400 Subject: [PATCH 21/65] r/aws_pipes_pipe: Fill out 'expandUpdatePipeSourceParameters'. --- internal/service/pipes/source_parameters.go | 236 +++++++++++++++++++- 1 file changed, 231 insertions(+), 5 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index a184f00bff6..4d8ad82f51a 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -672,7 +672,37 @@ func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.Updat apiObject := &types.UpdatePipeSourceParameters{} - // TODO + if v, ok := tfMap["activemq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ActiveMQBrokerParameters = expandUpdatePipeSourceActiveMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["dynamodb_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DynamoDBStreamParameters = expandUpdatePipeSourceDynamoDBStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandUpdatePipeSourceKinesisStreamParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["managed_streaming_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ManagedStreamingKafkaParameters = expandUpdatePipeSourceManagedStreamingKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["rabbitmq_broker_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RabbitMQBrokerParameters = expandUpdatePipeSourceRabbitMQBrokerParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["self_managed_kafka_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SelfManagedKafkaParameters = expandUpdatePipeSourceSelfManagedKafkaParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandUpdatePipeSourceSqsQueueParameters(v[0].(map[string]interface{})) + } return apiObject } @@ -757,6 +787,28 @@ func expandPipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *typ return apiObject } +func expandUpdatePipeSourceActiveMQBrokerParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceActiveMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceActiveMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + func expandMQBrokerAccessCredentials(tfMap map[string]interface{}) types.MQBrokerAccessCredentials { if tfMap == nil { return nil @@ -815,6 +867,60 @@ func expandPipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *typ return apiObject } +func expandUpdatePipeSourceDynamoDBStreamParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceDynamoDBStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceDynamoDBStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } else { + apiObject.DeadLetterConfig = &types.DeadLetterConfig{} + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + return apiObject +} + +func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { + if tfMap == nil { + return nil + } + + apiObject := &types.DeadLetterConfig{} + + if v, ok := tfMap["arn"].(string); ok && v != "" { + apiObject.Arn = aws.String(v) + } + + return apiObject +} + func expandPipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeSourceKinesisStreamParameters { if tfMap == nil { return nil @@ -863,6 +969,46 @@ func expandPipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *type return apiObject } +func expandUpdatePipeSourceKinesisStreamParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceKinesisStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceKinesisStreamParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["dead_letter_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.DeadLetterConfig = expandDeadLetterConfig(v[0].(map[string]interface{})) + } else { + apiObject.DeadLetterConfig = &types.DeadLetterConfig{} + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_record_age_in_seconds"].(int); ok { + apiObject.MaximumRecordAgeInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_retry_attempts"].(int); ok { + apiObject.MaximumRetryAttempts = aws.Int32(int32(v)) + } + + if v, ok := tfMap["on_partial_batch_item_failure"].(string); ok { + apiObject.OnPartialBatchItemFailure = types.OnPartialBatchItemFailureStreams(v) + } + + if v, ok := tfMap["parallelization_factor"].(int); ok { + apiObject.ParallelizationFactor = aws.Int32(int32(v)) + } + + return apiObject +} + func expandPipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceManagedStreamingKafkaParameters { if tfMap == nil { return nil @@ -897,6 +1043,28 @@ func expandPipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{ return apiObject } +func expandUpdatePipeSourceManagedStreamingKafkaParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceManagedStreamingKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceManagedStreamingKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMSKAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + func expandMSKAccessCredentials(tfMap map[string]interface{}) types.MSKAccessCredentials { if tfMap == nil { return nil @@ -951,6 +1119,28 @@ func expandPipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *typ return apiObject } +func expandUpdatePipeSourceRabbitMQBrokerParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceRabbitMQBrokerParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceRabbitMQBrokerParameters{} + + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandMQBrokerAccessCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + return apiObject +} + func expandPipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *types.PipeSourceSelfManagedKafkaParameters { if tfMap == nil { return nil @@ -997,6 +1187,38 @@ func expandPipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *t return apiObject } +func expandUpdatePipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSelfManagedKafkaParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.UpdatePipeSourceSelfManagedKafkaParameters{} + + if v, ok := tfMap["batch_size"].(int); ok && v != 0 { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["credentials"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Credentials = expandSelfManagedKafkaAccessConfigurationCredentials(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) + } + + if v, ok := tfMap["server_root_ca_certificate"].(string); ok { + apiObject.ServerRootCaCertificate = aws.String(v) + } + + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVpc(v[0].(map[string]interface{})) + } else { + apiObject.Vpc = &types.SelfManagedKafkaAccessConfigurationVpc{} + } + + return apiObject +} + func expandSelfManagedKafkaAccessConfigurationCredentials(tfMap map[string]interface{}) types.SelfManagedKafkaAccessConfigurationCredentials { if tfMap == nil { return nil @@ -1073,15 +1295,19 @@ func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.Pip return apiObject } -func expandDeadLetterConfig(tfMap map[string]interface{}) *types.DeadLetterConfig { +func expandUpdatePipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSqsQueueParameters { if tfMap == nil { return nil } - apiObject := &types.DeadLetterConfig{} + apiObject := &types.UpdatePipeSourceSqsQueueParameters{} - if v, ok := tfMap["arn"].(string); ok && v != "" { - apiObject.Arn = aws.String(v) + if v, ok := tfMap["batch_size"].(int); ok { + apiObject.BatchSize = aws.Int32(int32(v)) + } + + if v, ok := tfMap["maximum_batching_window_in_seconds"].(int); ok { + apiObject.MaximumBatchingWindowInSeconds = aws.Int32(int32(v)) } return apiObject From b2363d3efe3260983c559a852cc3ca60db3d160f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 14:39:45 -0400 Subject: [PATCH 22/65] Update CHANGELOG entry. --- .changelog/31607.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 9f41fdcdf29..e9f02888184 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -3,7 +3,7 @@ resource/aws_pipes_pipe: Add `enrichment_parameters` argument ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `active_mq_broker`, `dynamo_db_stream`, `kinesis_stream`, `managed_streaming_kafka`, `rabbit_mq_broker`, `self_managed_kafka` and `sqs_queue` attributes to the `source_parameters` configuration block +resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_parameters`, `kinesis_stream_parameters`, `managed_streaming_kafka_parameters`, `rabbitmq_broker_parameters`, `self_managed_kafka_parameters` and `sqs_queue_parameters` attributes to the `source_parameters` configuration block ``` ```release-note:enhancement From 9c7c8c16b0e10107e28f50c2b68670cf19582704 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 14:47:25 -0400 Subject: [PATCH 23/65] r/aws_pipes_pipe: 'target_parameters.step_function' -> 'target_parameters.step_function_state_machine_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 58 ++++++++++++++++----- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 48 insertions(+), 16 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index e9f02888184..ef6ceecba34 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue` and `step_function` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index c44a9307e01..cd43017caae 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -33,7 +33,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -178,7 +178,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -212,7 +212,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -545,7 +545,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -603,7 +603,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -672,7 +672,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -698,7 +698,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -724,7 +724,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.sagemaker_pipeline", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -778,7 +778,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sqs_queue", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -821,7 +821,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.step_function", + "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -838,7 +838,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "step_function": { + "step_function_state_machine_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -876,7 +876,23 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP apiObject := &types.PipeTargetParameters{} - // ... nested attribute handling ... + if v, ok := tfMap["step_function_state_machine_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.StepFunctionStateMachineParameters = expandPipeTargetStateMachineParameters(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandPipeTargetStateMachineParameters(tfMap map[string]interface{}) *types.PipeTargetStateMachineParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetStateMachineParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } return apiObject } @@ -888,7 +904,23 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri tfMap := map[string]interface{}{} - // ... nested attribute handling ... + if v := apiObject.StepFunctionStateMachineParameters; v != nil { + tfMap["step_function_state_machine_parameters"] = []interface{}{flattenPipeTargetStateMachineParameters(v)} + } + + return tfMap +} + +func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMachineParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v + } return tfMap } diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 4edf1c61745..57742030ca5 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -331,7 +331,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. * `sagemaker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. * `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. -* `step_function` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. +* `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. #### target_parameters.batch_target Configuration Block @@ -532,7 +532,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `message_deduplication_id` - (Optional) This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. * `message_group_id` - (Optional) The FIFO message group ID to use as the target. -#### target_parameters.step_function Configuration Block +#### target_parameters.step_function_state_machine_parameters Configuration Block * `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. From ef71c23340d651abbdb5d2d5ca00849ad314e2eb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 14:53:45 -0400 Subject: [PATCH 24/65] r/aws_pipes_pipe: 'target_parameters.sqs_queue' -> 'target_parameters.sqs_queue_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 70 +++++++++++++++++---- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 62 insertions(+), 14 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index ef6ceecba34..cab94a789fc 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index cd43017caae..f4452397e73 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -32,7 +32,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -177,7 +177,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -211,7 +211,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -544,7 +544,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -602,7 +602,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -671,7 +671,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -697,7 +697,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -723,7 +723,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -777,7 +777,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -807,7 +807,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "sqs_queue": { + "sqs_queue_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -852,7 +852,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", "target_parameters.0.sagemaker_pipeline", - "target_parameters.0.sqs_queue", + "target_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -876,6 +876,12 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP apiObject := &types.PipeTargetParameters{} + // TODO + + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SqsQueueParameters = expandPipeTargetSqsQueueParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["step_function_state_machine_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.StepFunctionStateMachineParameters = expandPipeTargetStateMachineParameters(v[0].(map[string]interface{})) } @@ -883,6 +889,24 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetSqsQueueParameters{} + + if v, ok := tfMap["message_deduplication_id"].(string); ok { + apiObject.MessageDeduplicationId = aws.String(v) + } + + if v, ok := tfMap["message_group_id"].(string); ok { + apiObject.MessageGroupId = aws.String(v) + } + + return apiObject +} + func expandPipeTargetStateMachineParameters(tfMap map[string]interface{}) *types.PipeTargetStateMachineParameters { if tfMap == nil { return nil @@ -904,6 +928,12 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri tfMap := map[string]interface{}{} + // TODO + + if v := apiObject.SqsQueueParameters; v != nil { + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSqsQueueParameters(v)} + } + if v := apiObject.StepFunctionStateMachineParameters; v != nil { tfMap["step_function_state_machine_parameters"] = []interface{}{flattenPipeTargetStateMachineParameters(v)} } @@ -911,6 +941,24 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetSqsQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.MessageDeduplicationId; v != nil { + tfMap["message_deduplication_id"] = aws.ToString(v) + } + + if v := apiObject.MessageGroupId; v != nil { + tfMap["message_group_id"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMachineParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 57742030ca5..fe96aa40275 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -330,7 +330,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. * `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. * `sagemaker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. -* `sqs_queue` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. +* `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. * `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. #### target_parameters.batch_target Configuration Block @@ -527,7 +527,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `name` - (Optional) Name of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 256. * `value` - (Optional) Value of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 1024. -#### target_parameters.sqs_queue Configuration Block +#### target_parameters.sqs_queue_parameters Configuration Block * `message_deduplication_id` - (Optional) This parameter applies only to FIFO (first-in-first-out) queues. The token used for deduplication of sent messages. * `message_group_id` - (Optional) The FIFO message group ID to use as the target. From 4df4d06e2f1201a13722c42860d01acc6096eb5c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 15:03:38 -0400 Subject: [PATCH 25/65] r/aws_pipes_pipe: 'target_parameters.sagemaker_pipeline' -> 'target_parameters.sagemaker_pipeline_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 132 ++++++++++++++++++-- website/docs/r/pipes_pipe.html.markdown | 8 +- 3 files changed, 125 insertions(+), 17 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index cab94a789fc..65f1668e39f 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index f4452397e73..29ae23afb3a 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -31,7 +31,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -176,7 +176,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -210,7 +210,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -543,7 +543,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -601,7 +601,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -670,7 +670,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -696,7 +696,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -722,7 +722,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -764,7 +764,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "sagemaker_pipeline": { + "sagemaker_pipeline_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -782,7 +782,7 @@ func targetParametersSchema() *schema.Schema { }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "parameters": { + "pipeline_parameter": { Type: schema.TypeList, Optional: true, MaxItems: 200, @@ -820,7 +820,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.step_function_state_machine_parameters", }, Elem: &schema.Resource{ @@ -851,7 +851,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", "target_parameters.0.redshift_data", - "target_parameters.0.sagemaker_pipeline", + "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", }, Elem: &schema.Resource{ @@ -878,6 +878,10 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["sagemaker_pipeline_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.SageMakerPipelineParameters = expandPipeTargetSageMakerPipelineParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.SqsQueueParameters = expandPipeTargetSqsQueueParameters(v[0].(map[string]interface{})) } @@ -889,6 +893,64 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetSageMakerPipelineParameters(tfMap map[string]interface{}) *types.PipeTargetSageMakerPipelineParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetSageMakerPipelineParameters{} + + if v, ok := tfMap["pipeline_parameter"].([]interface{}); ok && len(v) > 0 { + apiObject.PipelineParameterList = expandSageMakerPipelineParameters(v) + } + + return apiObject +} + +func expandSageMakerPipelineParameter(tfMap map[string]interface{}) *types.SageMakerPipelineParameter { + if tfMap == nil { + return nil + } + + apiObject := &types.SageMakerPipelineParameter{} + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPipelineParameter { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.SageMakerPipelineParameter + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandSageMakerPipelineParameter(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { if tfMap == nil { return nil @@ -930,6 +992,10 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.SageMakerPipelineParameters; v != nil { + tfMap["sagemaker_pipeline_parameters"] = []interface{}{flattenPipeTargetSageMakerPipelineParameters(v)} + } + if v := apiObject.SqsQueueParameters; v != nil { tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSqsQueueParameters(v)} } @@ -941,6 +1007,48 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetSageMakerPipelineParameters(apiObject *types.PipeTargetSageMakerPipelineParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.PipelineParameterList; v != nil { + tfMap["pipeline_parameter"] = flattenSageMakerPipelineParameters(v) + } + + return tfMap +} + +func flattenSageMakerPipelineParameter(apiObject types.SageMakerPipelineParameter) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelineParameter) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenSageMakerPipelineParameter(apiObject)) + } + + return tfList +} + func flattenPipeTargetSqsQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index fe96aa40275..91104cca0c3 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -329,7 +329,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. * `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. -* `sagemaker_pipeline` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. +* `sagemaker_pipeline_parameters` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. * `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. @@ -518,11 +518,11 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `statement_name` - (Optional) The name of the SQL statement. You can name the SQL statement when you create it to identify the query. * `with_event` - (Optional) Indicates whether to send an event back to EventBridge after the SQL statement runs. -#### target_parameters.sagemaker_pipeline Configuration Block +#### target_parameters.sagemaker_pipeline_parameters Configuration Block -* `parameters` - (Optional) List of Parameter names and values for SageMaker Model Building Pipeline execution. Detailed below. +* `pipeline_parameter` - (Optional) List of Parameter names and values for SageMaker Model Building Pipeline execution. Detailed below. -##### target_parameters.sagemaker_pipeline.parameters Configuration Block +##### target_parameters.sagemaker_pipeline_parameters.parameters Configuration Block * `name` - (Optional) Name of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 256. * `value` - (Optional) Value of parameter to start execution of a SageMaker Model Building Pipeline. Maximum length of 1024. From f24f40ae25c66e94e948451aae6c4255b7bf42b8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 15:13:41 -0400 Subject: [PATCH 26/65] r/aws_pipes_pipe: 'target_parameters.redshift_data' -> 'target_parameters.redshift_data_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 108 +++++++++++++++++--- website/docs/r/pipes_pipe.html.markdown | 6 +- 3 files changed, 95 insertions(+), 21 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 65f1668e39f..0163a6ecfe2 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 29ae23afb3a..f89683be6b7 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -30,7 +30,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -175,7 +175,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -209,7 +209,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -542,7 +542,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -600,7 +600,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -669,7 +669,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -695,7 +695,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -710,7 +710,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "redshift_data": { + "redshift_data_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -733,7 +733,7 @@ func targetParametersSchema() *schema.Schema { Required: true, ValidateFunc: validation.StringLenBetween(1, 64), }, - "database_user": { + "db_user": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringLenBetween(1, 128), @@ -743,11 +743,6 @@ func targetParametersSchema() *schema.Schema { Optional: true, ValidateFunc: verify.ValidARN, }, - "statement_name": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 500), - }, "sqls": { Type: schema.TypeSet, Required: true, @@ -756,6 +751,11 @@ func targetParametersSchema() *schema.Schema { ValidateFunc: validation.StringLenBetween(1, 100000), }, }, + "statement_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 500), + }, "with_event": { Type: schema.TypeBool, Optional: true, @@ -776,7 +776,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -819,7 +819,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.step_function_state_machine_parameters", }, @@ -850,7 +850,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", "target_parameters.0.lambda_function", - "target_parameters.0.redshift_data", + "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", }, @@ -878,6 +878,10 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["redshift_data_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RedshiftDataParameters = expandPipeTargetRedshiftDataParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["sagemaker_pipeline_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.SageMakerPipelineParameters = expandPipeTargetSageMakerPipelineParameters(v[0].(map[string]interface{})) } @@ -893,6 +897,40 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types.PipeTargetRedshiftDataParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetRedshiftDataParameters{} + + if v, ok := tfMap["database"].(string); ok { + apiObject.Database = aws.String(v) + } + + if v, ok := tfMap["db_user"].(string); ok { + apiObject.DbUser = aws.String(v) + } + + if v, ok := tfMap["secret_manager_arn"].(string); ok { + apiObject.SecretManagerArn = aws.String(v) + } + + if v, ok := tfMap["sqls"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Sqls = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["statement_name"].(string); ok { + apiObject.StatementName = aws.String(v) + } + + if v, ok := tfMap["with_event"].(bool); ok { + apiObject.WithEvent = v + } + + return apiObject +} + func expandPipeTargetSageMakerPipelineParameters(tfMap map[string]interface{}) *types.PipeTargetSageMakerPipelineParameters { if tfMap == nil { return nil @@ -992,6 +1030,10 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.RedshiftDataParameters; v != nil { + tfMap["redshift_data_parameters"] = []interface{}{flattenPipeTargetRedshiftDataParameters(v)} + } + if v := apiObject.SageMakerPipelineParameters; v != nil { tfMap["sagemaker_pipeline_parameters"] = []interface{}{flattenPipeTargetSageMakerPipelineParameters(v)} } @@ -1007,6 +1049,38 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetRedshiftDataParameters(apiObject *types.PipeTargetRedshiftDataParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "with_event": apiObject.WithEvent, + } + + if v := apiObject.Database; v != nil { + tfMap["database"] = aws.ToString(v) + } + + if v := apiObject.DbUser; v != nil { + tfMap["db_user"] = aws.ToString(v) + } + + if v := apiObject.SecretManagerArn; v != nil { + tfMap["secret_manager_arn"] = aws.ToString(v) + } + + if v := apiObject.Sqls; v != nil { + tfMap["sqls"] = v + } + + if v := apiObject.StatementName; v != nil { + tfMap["statement_name"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeTargetSageMakerPipelineParameters(apiObject *types.PipeTargetSageMakerPipelineParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 91104cca0c3..b03ce3484d2 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -328,7 +328,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. -* `redshift_data` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. +* `redshift_data_parameters` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. * `sagemaker_pipeline_parameters` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. * `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. @@ -509,10 +509,10 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. -#### target_parameters.redshift_data Configuration Block +#### target_parameters.redshift_data_parameters Configuration Block * `database` - (Required) The name of the database. Required when authenticating using temporary credentials. -* `database_user` - (Optional) The database user name. Required when authenticating using temporary credentials. +* `db_user` - (Optional) The database user name. Required when authenticating using temporary credentials. * `secret_manager_arn` - (Optional) The name or ARN of the secret that enables access to the database. Required when authenticating using Secrets Manager. * `sqls` - (Optional) List of SQL statements text to run, each of maximum length of 100,000. * `statement_name` - (Optional) The name of the SQL statement. You can name the SQL statement when you create it to identify the query. From c0865b213d923cd0068b66be22a8d7f7cce10119 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 15:19:23 -0400 Subject: [PATCH 27/65] r/aws_pipes_pipe: 'target_parameters.lambda_function' -> 'target_parameters.lambda_function_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 70 ++++++++++++++++----- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 56 insertions(+), 20 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 0163a6ecfe2..22ddcf385ce 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index f89683be6b7..c911b10a2b2 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -29,7 +29,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -174,7 +174,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -208,7 +208,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -541,7 +541,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -599,7 +599,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -668,7 +668,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -684,7 +684,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "lambda_function": { + "lambda_function_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -721,7 +721,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -775,7 +775,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sqs_queue_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -818,7 +818,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.step_function_state_machine_parameters", @@ -849,7 +849,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream", - "target_parameters.0.lambda_function", + "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -878,6 +878,10 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["lambda_function_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.LambdaFunctionParameters = expandPipeTargetLambdaFunctionParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["redshift_data_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.RedshiftDataParameters = expandPipeTargetRedshiftDataParameters(v[0].(map[string]interface{})) } @@ -897,6 +901,20 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetLambdaFunctionParameters(tfMap map[string]interface{}) *types.PipeTargetLambdaFunctionParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetLambdaFunctionParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } + + return apiObject +} + func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types.PipeTargetRedshiftDataParameters { if tfMap == nil { return nil @@ -904,15 +922,15 @@ func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types apiObject := &types.PipeTargetRedshiftDataParameters{} - if v, ok := tfMap["database"].(string); ok { + if v, ok := tfMap["database"].(string); ok && v != "" { apiObject.Database = aws.String(v) } - if v, ok := tfMap["db_user"].(string); ok { + if v, ok := tfMap["db_user"].(string); ok && v != "" { apiObject.DbUser = aws.String(v) } - if v, ok := tfMap["secret_manager_arn"].(string); ok { + if v, ok := tfMap["secret_manager_arn"].(string); ok && v != "" { apiObject.SecretManagerArn = aws.String(v) } @@ -920,7 +938,7 @@ func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types apiObject.Sqls = flex.ExpandStringValueSet(v) } - if v, ok := tfMap["statement_name"].(string); ok { + if v, ok := tfMap["statement_name"].(string); ok && v != "" { apiObject.StatementName = aws.String(v) } @@ -996,11 +1014,11 @@ func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.Pip apiObject := &types.PipeTargetSqsQueueParameters{} - if v, ok := tfMap["message_deduplication_id"].(string); ok { + if v, ok := tfMap["message_deduplication_id"].(string); ok && v != "" { apiObject.MessageDeduplicationId = aws.String(v) } - if v, ok := tfMap["message_group_id"].(string); ok { + if v, ok := tfMap["message_group_id"].(string); ok && v != "" { apiObject.MessageGroupId = aws.String(v) } @@ -1030,6 +1048,10 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.LambdaFunctionParameters; v != nil { + tfMap["lambda_function_parameters"] = []interface{}{flattenPipeTargetLambdaFunctionParameters(v)} + } + if v := apiObject.RedshiftDataParameters; v != nil { tfMap["redshift_data_parameters"] = []interface{}{flattenPipeTargetRedshiftDataParameters(v)} } @@ -1049,6 +1071,20 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetLambdaFunctionParameters(apiObject *types.PipeTargetLambdaFunctionParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v + } + + return tfMap +} + func flattenPipeTargetRedshiftDataParameters(apiObject *types.PipeTargetRedshiftDataParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index b03ce3484d2..251e7c26281 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -327,7 +327,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. * `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. * `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. -* `lambda_function` - (Optional) The parameters for using a Lambda function as a target. Detailed below. +* `lambda_function_parameters` - (Optional) The parameters for using a Lambda function as a target. Detailed below. * `redshift_data_parameters` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. * `sagemaker_pipeline_parameters` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. @@ -505,7 +505,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `partition_key` - (Required) Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. -#### target_parameters.lambda_function Configuration Block +#### target_parameters.lambda_function_parameters Configuration Block * `invocation_type` - (Optional) Specify whether to invoke the function synchronously or asynchronously. Valid Values: REQUEST_RESPONSE, FIRE_AND_FORGET. From 92d6154b3a789a24ecd16717f8fb5936a9bada2e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 15:23:41 -0400 Subject: [PATCH 28/65] r/aws_pipes_pipe: 'target_parameters.kinesis_stream_parameters' -> 'target_parameters.kinesis_stream_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 58 +++++++++++++++++---- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 22ddcf385ce..e792838e692 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index c911b10a2b2..c169a3cdf42 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -28,7 +28,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -173,7 +173,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -207,7 +207,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.cloudwatch_logs", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -540,7 +540,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -598,7 +598,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -658,7 +658,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, ValidateFunc: validation.StringLenBetween(0, 8192), }, - "kinesis_stream": { + "kinesis_stream_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -694,7 +694,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -720,7 +720,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.sagemaker_pipeline_parameters", "target_parameters.0.sqs_queue_parameters", @@ -774,7 +774,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sqs_queue_parameters", @@ -817,7 +817,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -848,7 +848,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.ecs_task", "target_parameters.0.event_bridge_event_bus", "target_parameters.0.http_parameters", - "target_parameters.0.kinesis_stream", + "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", "target_parameters.0.sagemaker_pipeline_parameters", @@ -878,6 +878,10 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.KinesisStreamParameters = expandPipeTargetKinesisStreamParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["lambda_function_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.LambdaFunctionParameters = expandPipeTargetLambdaFunctionParameters(v[0].(map[string]interface{})) } @@ -901,6 +905,20 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeTargetKinesisStreamParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetKinesisStreamParameters{} + + if v, ok := tfMap["partition_key"].(string); ok && v != "" { + apiObject.PartitionKey = aws.String(v) + } + + return apiObject +} + func expandPipeTargetLambdaFunctionParameters(tfMap map[string]interface{}) *types.PipeTargetLambdaFunctionParameters { if tfMap == nil { return nil @@ -1048,6 +1066,10 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.KinesisStreamParameters; v != nil { + tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeTargetKinesisStreamParameters(v)} + } + if v := apiObject.LambdaFunctionParameters; v != nil { tfMap["lambda_function_parameters"] = []interface{}{flattenPipeTargetLambdaFunctionParameters(v)} } @@ -1071,6 +1093,20 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetKinesisStreamParameters(apiObject *types.PipeTargetKinesisStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.PartitionKey; v != nil { + tfMap["partition_key"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeTargetLambdaFunctionParameters(apiObject *types.PipeTargetLambdaFunctionParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 251e7c26281..c67c5408142 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -326,7 +326,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `event_bridge_event_bus` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. * `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. * `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. -* `kinesis_stream` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. +* `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. * `lambda_function_parameters` - (Optional) The parameters for using a Lambda function as a target. Detailed below. * `redshift_data_parameters` - (Optional) These are custom parameters to be used when the target is a Amazon Redshift cluster to invoke the Amazon Redshift Data API BatchExecuteStatement. Detailed below. * `sagemaker_pipeline_parameters` - (Optional) The parameters for using a SageMaker pipeline as a target. Detailed below. @@ -501,7 +501,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `key` - (Optional) The name of the query string. Maximum length of 512 characters. * `value` - (Optional) The header query string. Maximum length of 512 characters. -#### target_parameters.kinesis_stream Configuration Block +#### target_parameters.kinesis_stream_parameters Configuration Block * `partition_key` - (Required) Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard. Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. From 121c10794f066fa4ce01a579e7f5b9a1875fdf8e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 16:33:05 -0400 Subject: [PATCH 29/65] r/aws_pipes_pipe: Modify 'target_parameters.http_parameters'. --- internal/service/pipes/target_parameters.go | 100 +++++++++++++------- website/docs/r/pipes_pipe.html.markdown | 16 +--- 2 files changed, 70 insertions(+), 46 deletions(-) diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index c169a3cdf42..6ff07039081 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -607,48 +607,22 @@ func targetParametersSchema() *schema.Schema { }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "header": { - Type: schema.TypeList, + "header_parameters": { + Type: schema.TypeMap, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, }, - "path_parameters": { + "path_parameter_values": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, }, }, - "query_string": { - Type: schema.TypeList, + "query_string_parameters": { + Type: schema.TypeMap, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(0, 512), - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, }, }, }, @@ -878,6 +852,14 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.HttpParameters = expandPipeTargetHttpParameters(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["input_template"].(string); ok && v != "" { + apiObject.InputTemplate = aws.String(v) + } + if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.KinesisStreamParameters = expandPipeTargetKinesisStreamParameters(v[0].(map[string]interface{})) } @@ -905,6 +887,28 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetHttpParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetHttpParameters{} + + if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.HeaderParameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { + apiObject.PathParameterValues = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) + } + + return apiObject +} + func expandPipeTargetKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeTargetKinesisStreamParameters { if tfMap == nil { return nil @@ -1066,6 +1070,14 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.HttpParameters; v != nil { + tfMap["http_parameters"] = []interface{}{flattenPipeTargetHttpParameters(v)} + } + + if v := apiObject.InputTemplate; v != nil { + tfMap["input_template"] = aws.ToString(v) + } + if v := apiObject.KinesisStreamParameters; v != nil { tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeTargetKinesisStreamParameters(v)} } @@ -1093,6 +1105,28 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetHttpParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.HeaderParameters; v != nil { + tfMap["header_parameters"] = v + } + + if v := apiObject.PathParameterValues; v != nil { + tfMap["path_parameter_values"] = v + } + + if v := apiObject.QueryStringParameters; v != nil { + tfMap["query_string_parameters"] = v + } + + return tfMap +} + func flattenPipeTargetKinesisStreamParameters(apiObject *types.PipeTargetKinesisStreamParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index c67c5408142..1de682d8fbb 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -487,19 +487,9 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: #### target_parameters.http_parameters Configuration Block -* `header` - (Optional) The headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. -* `path_parameters` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). -* `query_string` - (Optional) The query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. - -##### target_parameters.http_parameters.header Configuration Block - -* `key` - (Optional) The name of the header. Maximum length of 512 characters. -* `value` - (Optional) The header value. Maximum length of 512 characters. - -##### target_parameters.http_parameters.query_string Configuration Block - -* `key` - (Optional) The name of the query string. Maximum length of 512 characters. -* `value` - (Optional) The header query string. Maximum length of 512 characters. +* `header_parameters` - (Optional) Key-value mapping of the headers that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. +* `path_parameter_values` - (Optional) The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*"). +* `query_string_parameters` - (Optional) Key-value mapping of the query strings that need to be sent as part of request invoking the API Gateway REST API or EventBridge ApiDestination. Detailed below. #### target_parameters.kinesis_stream_parameters Configuration Block From 8080fa91c1745d01fdc31635a14181312712418f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 16:42:19 -0400 Subject: [PATCH 30/65] r/aws_pipes_pipe: 'target_parameters.event_bridge_event_bus' -> 'target_parameters.eventbridge_event_bus_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 90 ++++++++++++++++++--- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 82 insertions(+), 14 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index e792838e692..09a2a275388 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `event_bridge_event_bus`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 6ff07039081..a8aed4cbd10 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -26,7 +26,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -171,7 +171,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_target", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -205,7 +205,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -531,7 +531,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "event_bridge_event_bus": { + "eventbridge_event_bus_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -597,7 +597,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", @@ -640,7 +640,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.lambda_function_parameters", "target_parameters.0.redshift_data_parameters", @@ -666,7 +666,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.redshift_data_parameters", @@ -692,7 +692,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -746,7 +746,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -789,7 +789,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -820,7 +820,7 @@ func targetParametersSchema() *schema.Schema { "target_parameters.0.batch_target", "target_parameters.0.cloudwatch_logs", "target_parameters.0.ecs_task", - "target_parameters.0.event_bridge_event_bus", + "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -852,6 +852,10 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["eventbridge_event_bus_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EventBridgeEventBusParameters = expandPipeTargetEventBridgeEventBusParameters(v[0].(map[string]interface{})) + } + if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.HttpParameters = expandPipeTargetHttpParameters(v[0].(map[string]interface{})) } @@ -887,6 +891,36 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) *types.PipeTargetEventBridgeEventBusParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetEventBridgeEventBusParameters{} + + if v, ok := tfMap["detail_type"].(string); ok && v != "" { + apiObject.DetailType = aws.String(v) + } + + if v, ok := tfMap["endpoint_id"].(string); ok && v != "" { + apiObject.EndpointId = aws.String(v) + } + + if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Resources = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["source"].(string); ok && v != "" { + apiObject.Source = aws.String(v) + } + + if v, ok := tfMap["time"].(string); ok && v != "" { + apiObject.Time = aws.String(v) + } + + return apiObject +} + func expandPipeTargetHttpParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { if tfMap == nil { return nil @@ -1070,6 +1104,10 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.EventBridgeEventBusParameters; v != nil { + tfMap["eventbridge_event_bus_parameters"] = []interface{}{flattenPipeTargetEventBridgeEventBusParameters(v)} + } + if v := apiObject.HttpParameters; v != nil { tfMap["http_parameters"] = []interface{}{flattenPipeTargetHttpParameters(v)} } @@ -1105,6 +1143,36 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetEventBridgeEventBusParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DetailType; v != nil { + tfMap["detail_type"] = aws.ToString(v) + } + + if v := apiObject.EndpointId; v != nil { + tfMap["endpoint_id"] = aws.ToString(v) + } + + if v := apiObject.Resources; v != nil { + tfMap["resources"] = v + } + + if v := apiObject.Source; v != nil { + tfMap["source"] = aws.ToString(v) + } + + if v := apiObject.Time; v != nil { + tfMap["time"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeTargetHttpParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 1de682d8fbb..a089f96bb68 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -323,7 +323,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `batch_target` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. * `cloudwatch_logs` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. * `ecs_task` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. -* `event_bridge_event_bus` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. +* `eventbridge_event_bus_parameters` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. * `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. * `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. * `kinesis_stream_parameters` - (Optional) The parameters for using a Kinesis stream as a source. Detailed below. @@ -477,7 +477,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `key` - (Optional) A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources. Maximum length of 128. * `value` - (Optional) The value for the specified tag key. Maximum length of 256. -#### target_parameters.event_bridge_event_bus Configuration Block +#### target_parameters.eventbridge_event_bus_parameters Configuration Block * `detail_type` - (Optional) A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail. * `endpoint_id` - (Optional) The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo. From 0a20bb72599d8a3898d9befd79f48f6f8d05b6d0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 16:47:41 -0400 Subject: [PATCH 31/65] r/aws_pipes_pipe: 'target_parameters.cloudwatch_logs' -> 'target_parameters.cloudwatch_logs_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 70 +++++++++++++++++---- website/docs/r/pipes_pipe.html.markdown | 4 +- 3 files changed, 62 insertions(+), 14 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 09a2a275388..8922721db4f 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs_parameters`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index a8aed4cbd10..0b4ed5949d7 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -24,7 +24,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -164,7 +164,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "cloudwatch_logs": { + "cloudwatch_logs_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -204,7 +204,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -537,7 +537,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -595,7 +595,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -638,7 +638,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -664,7 +664,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -690,7 +690,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -744,7 +744,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -787,7 +787,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -818,7 +818,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_target", - "target_parameters.0.cloudwatch_logs", + "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -852,6 +852,12 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP // TODO + if v, ok := tfMap["cloudwatch_logs_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.CloudWatchLogsParameters = expandPipeTargetCloudWatchLogsParameters(v[0].(map[string]interface{})) + } + + // TODO + if v, ok := tfMap["eventbridge_event_bus_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.EventBridgeEventBusParameters = expandPipeTargetEventBridgeEventBusParameters(v[0].(map[string]interface{})) } @@ -891,6 +897,24 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetCloudWatchLogsParameters(tfMap map[string]interface{}) *types.PipeTargetCloudWatchLogsParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetCloudWatchLogsParameters{} + + if v, ok := tfMap["log_stream_name"].(string); ok && v != "" { + apiObject.LogStreamName = aws.String(v) + } + + if v, ok := tfMap["timestamp"].(string); ok && v != "" { + apiObject.Timestamp = aws.String(v) + } + + return apiObject +} + func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) *types.PipeTargetEventBridgeEventBusParameters { if tfMap == nil { return nil @@ -1104,6 +1128,12 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri // TODO + if v := apiObject.CloudWatchLogsParameters; v != nil { + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetCloudWatchLogsParameters(v)} + } + + // TODO + if v := apiObject.EventBridgeEventBusParameters; v != nil { tfMap["eventbridge_event_bus_parameters"] = []interface{}{flattenPipeTargetEventBridgeEventBusParameters(v)} } @@ -1143,6 +1173,24 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudWatchLogsParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.LogStreamName; v != nil { + tfMap["log_stream_name"] = aws.ToString(v) + } + + if v := apiObject.Timestamp; v != nil { + tfMap["timestamp"] = aws.ToString(v) + } + + return tfMap +} + func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetEventBridgeEventBusParameters) map[string]interface{} { if apiObject == nil { return nil diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index a089f96bb68..696e9cf66af 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -321,7 +321,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: You can find out more about EventBridge Pipes Targets in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-target.html). * `batch_target` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. -* `cloudwatch_logs` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. +* `cloudwatch_logs_parameters` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. * `ecs_task` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. * `eventbridge_event_bus_parameters` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. * `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. @@ -378,7 +378,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `attempts` - (Optional) The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. Maximum value of 10. -#### target_parameters.cloudwatch_logs Configuration Block +#### target_parameters.cloudwatch_logs_parameters Configuration Block * `log_stream_name` - (Optional) The name of the log stream. * `timestamp` - (Optional) The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. This is the JSON path to the field in the event e.g. $.detail.timestamp From a93645edfff29caf5de608cd621f294d4a161bbe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 13 Jun 2023 17:23:11 -0400 Subject: [PATCH 32/65] r/aws_pipes_pipe: 'target_parameters.batch_target' -> 'target_parameters.batch_job_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 449 ++++++++++++++++++-- website/docs/r/pipes_pipe.html.markdown | 23 +- 3 files changed, 432 insertions(+), 42 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index 8922721db4f..e24dbcb8d10 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_target`, `cloudwatch_logs_parameters`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 0b4ed5949d7..540c30430fc 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -19,7 +19,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "batch_target": { + "batch_job_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -84,7 +84,7 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeString, Optional: true, }, - "resource_requirements": { + "resource_requirement": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ @@ -132,20 +132,9 @@ func targetParametersSchema() *schema.Schema { ValidateFunc: validation.StringLenBetween(1, 128), }, "parameters": { - Type: schema.TypeList, + Type: schema.TypeMap, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, + Elem: &schema.Schema{Type: schema.TypeString}, }, "retry_strategy": { Type: schema.TypeList, @@ -169,7 +158,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -203,7 +192,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", @@ -536,7 +525,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.http_parameters", @@ -594,7 +583,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -637,7 +626,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -663,7 +652,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -689,7 +678,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -743,7 +732,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -786,7 +775,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -817,7 +806,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, MaxItems: 1, ConflictsWith: []string{ - "target_parameters.0.batch_target", + "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", "target_parameters.0.ecs_task", "target_parameters.0.eventbridge_event_bus_parameters", @@ -850,7 +839,9 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP apiObject := &types.PipeTargetParameters{} - // TODO + if v, ok := tfMap["batch_job_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.BatchJobParameters = expandPipeTargetBatchJobParameters(v[0].(map[string]interface{})) + } if v, ok := tfMap["cloudwatch_logs_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CloudWatchLogsParameters = expandPipeTargetCloudWatchLogsParameters(v[0].(map[string]interface{})) @@ -897,6 +888,230 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP return apiObject } +func expandPipeTargetBatchJobParameters(tfMap map[string]interface{}) *types.PipeTargetBatchJobParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetBatchJobParameters{} + + if v, ok := tfMap["array_properties"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ArrayProperties = expandBatchArrayProperties(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["container_overrides"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.ContainerOverrides = expandBatchContainerOverrides(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["depends_on"].([]interface{}); ok && len(v) > 0 { + apiObject.DependsOn = expandBatchJobDependencies(v) + } + + if v, ok := tfMap["job_definition"].(string); ok && v != "" { + apiObject.JobDefinition = aws.String(v) + } + + if v, ok := tfMap["job_name"].(string); ok && v != "" { + apiObject.JobName = aws.String(v) + } + + if v, ok := tfMap["parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.Parameters = flex.ExpandStringValueMap(v) + } + + if v, ok := tfMap["retry_strategy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.RetryStrategy = expandBatchRetryStrategy(v[0].(map[string]interface{})) + } + + return apiObject +} + +func expandBatchArrayProperties(tfMap map[string]interface{}) *types.BatchArrayProperties { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchArrayProperties{} + + if v, ok := tfMap["size"].(int); ok && v != 0 { + apiObject.Size = int32(v) + } + + return apiObject +} + +func expandBatchContainerOverrides(tfMap map[string]interface{}) *types.BatchContainerOverrides { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchContainerOverrides{} + + if v, ok := tfMap["command"].([]interface{}); ok && len(v) > 0 { + apiObject.Command = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["environment"].([]interface{}); ok && len(v) > 0 { + apiObject.Environment = expandBatchEnvironmentVariables(v) + } + + if v, ok := tfMap["instance_type"].(string); ok && v != "" { + apiObject.InstanceType = aws.String(v) + } + + if v, ok := tfMap["resource_requirement"].([]interface{}); ok && len(v) > 0 { + apiObject.ResourceRequirements = expandBatchResourceRequirements(v) + } + + return apiObject +} + +func expandBatchEnvironmentVariable(tfMap map[string]interface{}) *types.BatchEnvironmentVariable { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchEnvironmentVariable{} + + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandBatchEnvironmentVariables(tfList []interface{}) []types.BatchEnvironmentVariable { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchEnvironmentVariable + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchEnvironmentVariable(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchResourceRequirement(tfMap map[string]interface{}) *types.BatchResourceRequirement { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchResourceRequirement{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.BatchResourceRequirementType(v) + } + + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) + } + + return apiObject +} + +func expandBatchResourceRequirements(tfList []interface{}) []types.BatchResourceRequirement { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchResourceRequirement + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchResourceRequirement(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchJobDependency(tfMap map[string]interface{}) *types.BatchJobDependency { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchJobDependency{} + + if v, ok := tfMap["job_id"].(string); ok && v != "" { + apiObject.JobId = aws.String(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.BatchJobDependencyType(v) + } + + return apiObject +} + +func expandBatchJobDependencies(tfList []interface{}) []types.BatchJobDependency { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.BatchJobDependency + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandBatchJobDependency(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandBatchRetryStrategy(tfMap map[string]interface{}) *types.BatchRetryStrategy { + if tfMap == nil { + return nil + } + + apiObject := &types.BatchRetryStrategy{} + + if v, ok := tfMap["attempts"].(int); ok && v != 0 { + apiObject.Attempts = int32(v) + } + + return apiObject +} + func expandPipeTargetCloudWatchLogsParameters(tfMap map[string]interface{}) *types.PipeTargetCloudWatchLogsParameters { if tfMap == nil { return nil @@ -1126,7 +1341,9 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri tfMap := map[string]interface{}{} - // TODO + if v := apiObject.BatchJobParameters; v != nil { + tfMap["batch_job_parameters"] = []interface{}{flattenPipeTargetBatchJobParameters(v)} + } if v := apiObject.CloudWatchLogsParameters; v != nil { tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetCloudWatchLogsParameters(v)} @@ -1173,6 +1390,182 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri return tfMap } +func flattenPipeTargetBatchJobParameters(apiObject *types.PipeTargetBatchJobParameters) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.ArrayProperties; v != nil { + tfMap["array_properties"] = []interface{}{flattenBatchArrayProperties(v)} + } + + if v := apiObject.ContainerOverrides; v != nil { + tfMap["container_overrides"] = []interface{}{flattenBatchContainerOverrides(v)} + } + + if v := apiObject.DependsOn; v != nil { + tfMap["depends_on"] = flattenBatchJobDependencies(v) + } + + if v := apiObject.JobDefinition; v != nil { + tfMap["job_definition"] = aws.ToString(v) + } + + if v := apiObject.JobName; v != nil { + tfMap["job_name"] = aws.ToString(v) + } + + if v := apiObject.Parameters; v != nil { + tfMap["parameters"] = v + } + + if v := apiObject.RetryStrategy; v != nil { + tfMap["retry_strategy"] = []interface{}{flattenBatchRetryStrategy(v)} + } + + return tfMap +} + +func flattenBatchArrayProperties(apiObject *types.BatchArrayProperties) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Size; v != 0 { + tfMap["size"] = int(v) + } + + return tfMap +} + +func flattenBatchContainerOverrides(apiObject *types.BatchContainerOverrides) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Command; v != nil { + tfMap["command"] = v + } + + if v := apiObject.Environment; v != nil { + tfMap["environment"] = flattenBatchEnvironmentVariables(v) + } + + if v := apiObject.InstanceType; v != nil { + tfMap["instance_type"] = aws.ToString(v) + } + + if v := apiObject.ResourceRequirements; v != nil { + tfMap["resource_requirement"] = flattenBatchResourceRequirements(v) + } + + return tfMap +} + +func flattenBatchEnvironmentVariable(apiObject types.BatchEnvironmentVariable) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = v + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenBatchEnvironmentVariables(apiObjects []types.BatchEnvironmentVariable) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchEnvironmentVariable(apiObject)) + } + + return tfList +} + +func flattenBatchResourceRequirement(apiObject types.BatchResourceRequirement) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) + } + + return tfMap +} + +func flattenBatchResourceRequirements(apiObjects []types.BatchResourceRequirement) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchResourceRequirement(apiObject)) + } + + return tfList +} + +func flattenBatchJobDependency(apiObject types.BatchJobDependency) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.JobId; v != nil { + tfMap["job_id"] = aws.ToString(v) + } + + if v := apiObject.Type; v != "" { + tfMap["type"] = v + } + + return tfMap +} + +func flattenBatchJobDependencies(apiObjects []types.BatchJobDependency) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchJobDependency(apiObject)) + } + + return tfList +} + +func flattenBatchRetryStrategy(apiObject *types.BatchRetryStrategy) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.Attempts; v != 0 { + tfMap["attempts"] = int(v) + } + + return tfMap +} + func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudWatchLogsParameters) map[string]interface{} { if apiObject == nil { return nil @@ -1377,6 +1770,7 @@ func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMac return tfMap } +/* func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { if len(config) == 0 { return nil @@ -2569,3 +2963,4 @@ func flattenTargetStepFunctionStateMachineParameters(parameters *types.PipeTarge result := []map[string]interface{}{config} return result } +*/ diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index 696e9cf66af..f6548cd0b49 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -320,7 +320,7 @@ You can find out more about EventBridge Pipes Sources in the [User Guide](https: You can find out more about EventBridge Pipes Targets in the [User Guide](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-target.html). -* `batch_target` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. +* `batch_job_parameters` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. * `cloudwatch_logs_parameters` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. * `ecs_task` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. * `eventbridge_event_bus_parameters` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. @@ -333,7 +333,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `sqs_queue_parameters` - (Optional) The parameters for using a Amazon SQS stream as a target. Detailed below. * `step_function_state_machine_parameters` - (Optional) The parameters for using a Step Functions state machine as a target. Detailed below. -#### target_parameters.batch_target Configuration Block +#### target_parameters.batch_job_parameters Configuration Block * `array_properties` - (Optional) The array properties for the submitted job, such as the size of the array. The array size can be between 2 and 10,000. If you specify array properties for a job, it becomes an array job. This parameter is used only if the target is an AWS Batch job. Detailed below. * `container_overrides` - (Optional) The overrides that are sent to a container. Detailed below. @@ -343,38 +343,33 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `parameters` - (Optional) Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition. Detailed below. * `retry_strategy` - (Optional) The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition. Detailed below. -##### target_parameters.batch_target.array_properties Configuration Block +##### target_parameters.batch_job_parameters.array_properties Configuration Block * `size` - (Optional) The size of the array, if this is an array batch job. Minimum value of 2. Maximum value of 10,000. -##### target_parameters.batch_target.container_overrides Configuration Block +##### target_parameters.batch_job_parameters.container_overrides Configuration Block * `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. * `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. Environment variables cannot start with " AWS Batch ". This naming convention is reserved for variables that AWS Batch sets. Detailed below. * `instance_type` - (Optional) The instance type to use for a multi-node parallel job. This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided. -* `resource_requirements` - (Optional) The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. Detailed below. +* `resource_requirement` - (Optional) The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU. Detailed below. -###### target_parameters.batch_target.container_overrides.environment Configuration Block +###### target_parameters.batch_job_parameters.container_overrides.environment Configuration Block * `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. * `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. -###### target_parameters.batch_target.container_overrides.resource_requirements Configuration Block +###### target_parameters.batch_job_parameters.container_overrides.resource_requirements Configuration Block * `type` - (Optional) The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU. * `value` - (Optional) The quantity of the specified resource to reserve for the container. [The values vary based on the type specified](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchResourceRequirement.html). -##### target_parameters.batch_target.depends_on Configuration Block +##### target_parameters.batch_job_parameters.depends_on Configuration Block * `job_id` - (Optional) The job ID of the AWS Batch job that's associated with this dependency. * `type` - (Optional) The type of the job dependency. Valid Values: N_TO_N, SEQUENTIAL. -##### target_parameters.batch_target.parameters Configuration Block - -* `key` - (Optional) The name of the parameter. -* `value` - (Optional) The value of the parameter. - -##### target_parameters.batch_target.retry_strategy Configuration Block +##### target_parameters.batch_job_parameters.retry_strategy Configuration Block * `attempts` - (Optional) The number of times to move a job to the RUNNABLE status. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value. Maximum value of 10. From d77d3c5ad718ae030600f12c207cc6f7991bc452 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 12:36:19 -0400 Subject: [PATCH 33/65] r/aws_pipes_pipe: 'target_parameters.ecs_task' -> 'target_parameters.ecs_task_parameters'. --- .changelog/31607.txt | 2 +- internal/service/pipes/target_parameters.go | 2185 ++++++++----------- website/docs/r/pipes_pipe.html.markdown | 49 +- 3 files changed, 984 insertions(+), 1252 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index e24dbcb8d10..ae99e2b8e3f 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -7,5 +7,5 @@ resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_para ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task_parameters`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block ``` \ No newline at end of file diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 540c30430fc..ae2414d1b70 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -25,7 +26,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -159,7 +160,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -187,7 +188,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "ecs_task": { + "ecs_task_parameters": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -211,17 +212,17 @@ func targetParametersSchema() *schema.Schema { MaxItems: 6, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "capacity_provider": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 255), - }, "base": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100000), Default: 0, }, + "capacity_provider": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, "weight": { Type: schema.TypeInt, Optional: true, @@ -304,7 +305,7 @@ func targetParametersSchema() *schema.Schema { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "container_overrides": { + "container_override": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ @@ -336,7 +337,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "environment_files": { + "environment_file": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ @@ -366,7 +367,7 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeString, Optional: true, }, - "resource_requirements": { + "resource_requirement": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ @@ -390,7 +391,7 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeString, Optional: true, }, - "ecs_ephemeral_storage": { + "ephemeral_storage": { Type: schema.TypeList, Optional: true, MaxItems: 1, @@ -409,7 +410,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, ValidateFunc: verify.ValidARN, }, - "inference_accelerator_overrides": { + "inference_accelerator_override": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ @@ -437,7 +438,7 @@ func targetParametersSchema() *schema.Schema { }, }, }, - "placement_constraints": { + "placement_constraint": { Type: schema.TypeList, Optional: true, MaxItems: 10, @@ -489,24 +490,7 @@ func targetParametersSchema() *schema.Schema { Optional: true, ValidateFunc: validation.StringLenBetween(1, 1024), }, - "tags": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, - "value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringLenBetween(1, 256), - }, - }, - }, - }, + "tags": tftags.TagsSchema(), "task_count": { Type: schema.TypeInt, Optional: true, @@ -527,7 +511,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -585,7 +569,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.kinesis_stream_parameters", "target_parameters.0.lambda_function_parameters", @@ -628,7 +612,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.lambda_function_parameters", @@ -654,7 +638,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -680,7 +664,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -734,7 +718,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -777,7 +761,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -808,7 +792,7 @@ func targetParametersSchema() *schema.Schema { ConflictsWith: []string{ "target_parameters.0.batch_job_parameters", "target_parameters.0.cloudwatch_logs_parameters", - "target_parameters.0.ecs_task", + "target_parameters.0.ecs_task_parameters", "target_parameters.0.eventbridge_event_bus_parameters", "target_parameters.0.http_parameters", "target_parameters.0.kinesis_stream_parameters", @@ -847,7 +831,9 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP apiObject.CloudWatchLogsParameters = expandPipeTargetCloudWatchLogsParameters(v[0].(map[string]interface{})) } - // TODO + if v, ok := tfMap["ecs_task_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EcsTaskParameters = expandPipeTargetEcsTaskParameters(v[0].(map[string]interface{})) + } if v, ok := tfMap["eventbridge_event_bus_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.EventBridgeEventBusParameters = expandPipeTargetEventBridgeEventBusParameters(v[0].(map[string]interface{})) @@ -933,7 +919,7 @@ func expandBatchArrayProperties(tfMap map[string]interface{}) *types.BatchArrayP apiObject := &types.BatchArrayProperties{} - if v, ok := tfMap["size"].(int); ok && v != 0 { + if v, ok := tfMap["size"].(int); ok { apiObject.Size = int32(v) } @@ -1105,7 +1091,7 @@ func expandBatchRetryStrategy(tfMap map[string]interface{}) *types.BatchRetryStr apiObject := &types.BatchRetryStrategy{} - if v, ok := tfMap["attempts"].(int); ok && v != 0 { + if v, ok := tfMap["attempts"].(int); ok { apiObject.Attempts = int32(v) } @@ -1130,158 +1116,248 @@ func expandPipeTargetCloudWatchLogsParameters(tfMap map[string]interface{}) *typ return apiObject } -func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) *types.PipeTargetEventBridgeEventBusParameters { +func expandPipeTargetEcsTaskParameters(tfMap map[string]interface{}) *types.PipeTargetEcsTaskParameters { if tfMap == nil { return nil } - apiObject := &types.PipeTargetEventBridgeEventBusParameters{} + apiObject := &types.PipeTargetEcsTaskParameters{} - if v, ok := tfMap["detail_type"].(string); ok && v != "" { - apiObject.DetailType = aws.String(v) + if v, ok := tfMap["capacity_provider_strategy"].([]interface{}); ok && len(v) > 0 { + apiObject.CapacityProviderStrategy = expandCapacityProviderStrategyItems(v) } - if v, ok := tfMap["endpoint_id"].(string); ok && v != "" { - apiObject.EndpointId = aws.String(v) + if v, ok := tfMap["enable_ecs_managed_tags"].(bool); ok { + apiObject.EnableECSManagedTags = v } - if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Resources = flex.ExpandStringValueSet(v) + if v, ok := tfMap["enable_execute_command"].(bool); ok { + apiObject.EnableExecuteCommand = v } - if v, ok := tfMap["source"].(string); ok && v != "" { - apiObject.Source = aws.String(v) + if v, ok := tfMap["group"].(string); ok && v != "" { + apiObject.Group = aws.String(v) } - if v, ok := tfMap["time"].(string); ok && v != "" { - apiObject.Time = aws.String(v) + if v, ok := tfMap["launch_type"].(string); ok && v != "" { + apiObject.LaunchType = types.LaunchType(v) + } + + if v, ok := tfMap["network_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.NetworkConfiguration = expandNetworkConfiguration(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["overrides"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Overrides = expandEcsTaskOverride(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["placement_constraint"].([]interface{}); ok && len(v) > 0 { + apiObject.PlacementConstraints = expandPlacementConstraints(v) + } + + if v, ok := tfMap["placement_strategy"].([]interface{}); ok && len(v) > 0 { + apiObject.PlacementStrategy = expandPlacementStrategies(v) + } + + if v, ok := tfMap["platform_version"].(string); ok && v != "" { + apiObject.PlatformVersion = aws.String(v) + } + + if v, ok := tfMap["propagate_tags"].(string); ok && v != "" { + apiObject.PropagateTags = types.PropagateTags(v) + } + + if v, ok := tfMap["reference_id"].(string); ok && v != "" { + apiObject.ReferenceId = aws.String(v) + } + + if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { + for k, v := range flex.ExpandStringValueMap(v) { + apiObject.Tags = append(apiObject.Tags, types.Tag{Key: aws.String(k), Value: aws.String(v)}) + } + } + + if v, ok := tfMap["task_count"].(int); ok { + apiObject.TaskCount = aws.Int32(int32(v)) + } + + if v, ok := tfMap["task_definition_arn"].(string); ok && v != "" { + apiObject.TaskDefinitionArn = aws.String(v) } return apiObject } -func expandPipeTargetHttpParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { +func expandCapacityProviderStrategyItem(tfMap map[string]interface{}) *types.CapacityProviderStrategyItem { if tfMap == nil { return nil } - apiObject := &types.PipeTargetHttpParameters{} + apiObject := &types.CapacityProviderStrategyItem{} - if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.HeaderParameters = flex.ExpandStringValueMap(v) + if v, ok := tfMap["base"].(int); ok { + apiObject.Base = int32(v) } - if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { - apiObject.PathParameterValues = flex.ExpandStringValueList(v) + if v, ok := tfMap["capacity_provider"].(string); ok && v != "" { + apiObject.CapacityProvider = aws.String(v) } - if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) + if v, ok := tfMap["weight"].(int); ok { + apiObject.Weight = int32(v) } return apiObject } -func expandPipeTargetKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeTargetKinesisStreamParameters { +func expandCapacityProviderStrategyItems(tfList []interface{}) []types.CapacityProviderStrategyItem { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.CapacityProviderStrategyItem + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandCapacityProviderStrategyItem(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandNetworkConfiguration(tfMap map[string]interface{}) *types.NetworkConfiguration { if tfMap == nil { return nil } - apiObject := &types.PipeTargetKinesisStreamParameters{} + apiObject := &types.NetworkConfiguration{} - if v, ok := tfMap["partition_key"].(string); ok && v != "" { - apiObject.PartitionKey = aws.String(v) + if v, ok := tfMap["aws_vpc_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.AwsvpcConfiguration = expandAWSVPCConfiguration(v[0].(map[string]interface{})) } return apiObject } -func expandPipeTargetLambdaFunctionParameters(tfMap map[string]interface{}) *types.PipeTargetLambdaFunctionParameters { +func expandAWSVPCConfiguration(tfMap map[string]interface{}) *types.AwsVpcConfiguration { if tfMap == nil { return nil } - apiObject := &types.PipeTargetLambdaFunctionParameters{} + apiObject := &types.AwsVpcConfiguration{} - if v, ok := tfMap["invocation_type"].(string); ok && v != "" { - apiObject.InvocationType = types.PipeTargetInvocationType(v) + if v, ok := tfMap["assign_public_ip"].(string); ok && v != "" { + apiObject.AssignPublicIp = types.AssignPublicIp(v) + } + + if v, ok := tfMap["security_groups"].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["subnets"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Subnets = flex.ExpandStringValueSet(v) } return apiObject } -func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types.PipeTargetRedshiftDataParameters { +func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride { if tfMap == nil { return nil } - apiObject := &types.PipeTargetRedshiftDataParameters{} + apiObject := &types.EcsTaskOverride{} - if v, ok := tfMap["database"].(string); ok && v != "" { - apiObject.Database = aws.String(v) + if v, ok := tfMap["container_override"].([]interface{}); ok && len(v) > 0 { + apiObject.ContainerOverrides = expandEcsContainerOverrides(v) } - if v, ok := tfMap["db_user"].(string); ok && v != "" { - apiObject.DbUser = aws.String(v) + if v, ok := tfMap["cpu"].(string); ok && v != "" { + apiObject.Cpu = aws.String(v) } - if v, ok := tfMap["secret_manager_arn"].(string); ok && v != "" { - apiObject.SecretManagerArn = aws.String(v) + if v, ok := tfMap["ephemeral_storage"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.EphemeralStorage = expandEcsEphemeralStorage(v[0].(map[string]interface{})) } - if v, ok := tfMap["sqls"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Sqls = flex.ExpandStringValueSet(v) + if v, ok := tfMap["execution_role_arn"].(string); ok && v != "" { + apiObject.ExecutionRoleArn = aws.String(v) } - if v, ok := tfMap["statement_name"].(string); ok && v != "" { - apiObject.StatementName = aws.String(v) + if v, ok := tfMap["inference_accelerator_override"].([]interface{}); ok && len(v) > 0 { + apiObject.InferenceAcceleratorOverrides = expandEcsInferenceAcceleratorOverrides(v) } - if v, ok := tfMap["with_event"].(bool); ok { - apiObject.WithEvent = v + if v, ok := tfMap["memory"].(string); ok && v != "" { + apiObject.Memory = aws.String(v) + } + + if v, ok := tfMap["task_role_arn"].(string); ok && v != "" { + apiObject.TaskRoleArn = aws.String(v) } return apiObject } -func expandPipeTargetSageMakerPipelineParameters(tfMap map[string]interface{}) *types.PipeTargetSageMakerPipelineParameters { +func expandEcsContainerOverride(tfMap map[string]interface{}) *types.EcsContainerOverride { if tfMap == nil { return nil } - apiObject := &types.PipeTargetSageMakerPipelineParameters{} + apiObject := &types.EcsContainerOverride{} - if v, ok := tfMap["pipeline_parameter"].([]interface{}); ok && len(v) > 0 { - apiObject.PipelineParameterList = expandSageMakerPipelineParameters(v) + if v, ok := tfMap["command"].([]interface{}); ok && len(v) > 0 { + apiObject.Command = flex.ExpandStringValueList(v) } - return apiObject -} + if v, ok := tfMap["cpu"].(int); ok { + apiObject.Cpu = aws.Int32(int32(v)) + } -func expandSageMakerPipelineParameter(tfMap map[string]interface{}) *types.SageMakerPipelineParameter { - if tfMap == nil { - return nil + if v, ok := tfMap["environment"].([]interface{}); ok && len(v) > 0 { + apiObject.Environment = expandEcsEnvironmentVariables(v) } - apiObject := &types.SageMakerPipelineParameter{} + if v, ok := tfMap["environment_file"].([]interface{}); ok && len(v) > 0 { + apiObject.EnvironmentFiles = expandEcsEnvironmentFiles(v) + } + + if v, ok := tfMap["memory"].(int); ok { + apiObject.Memory = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_reservation"].(int); ok { + apiObject.MemoryReservation = aws.Int32(int32(v)) + } if v, ok := tfMap["name"].(string); ok && v != "" { apiObject.Name = aws.String(v) } - if v, ok := tfMap["value"].(string); ok && v != "" { - apiObject.Value = aws.String(v) + if v, ok := tfMap["resource_requirement"].([]interface{}); ok && len(v) > 0 { + apiObject.ResourceRequirements = expandEcsResourceRequirements(v) } return apiObject } -func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPipelineParameter { +func expandEcsContainerOverrides(tfList []interface{}) []types.EcsContainerOverride { if len(tfList) == 0 { return nil } - var apiObjects []types.SageMakerPipelineParameter + var apiObjects []types.EcsContainerOverride for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -1290,7 +1366,7 @@ func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPi continue } - apiObject := expandSageMakerPipelineParameter(tfMap) + apiObject := expandEcsContainerOverride(tfMap) if apiObject == nil { continue @@ -1302,419 +1378,657 @@ func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPi return apiObjects } -func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { +func expandEcsEnvironmentVariable(tfMap map[string]interface{}) *types.EcsEnvironmentVariable { if tfMap == nil { return nil } - apiObject := &types.PipeTargetSqsQueueParameters{} + apiObject := &types.EcsEnvironmentVariable{} - if v, ok := tfMap["message_deduplication_id"].(string); ok && v != "" { - apiObject.MessageDeduplicationId = aws.String(v) + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) } - if v, ok := tfMap["message_group_id"].(string); ok && v != "" { - apiObject.MessageGroupId = aws.String(v) + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) } return apiObject } -func expandPipeTargetStateMachineParameters(tfMap map[string]interface{}) *types.PipeTargetStateMachineParameters { - if tfMap == nil { +func expandEcsEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentVariable { + if len(tfList) == 0 { return nil } - apiObject := &types.PipeTargetStateMachineParameters{} + var apiObjects []types.EcsEnvironmentVariable - if v, ok := tfMap["invocation_type"].(string); ok && v != "" { - apiObject.InvocationType = types.PipeTargetInvocationType(v) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandEcsEnvironmentVariable(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - return apiObject + return apiObjects } -func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { - if apiObject == nil { +func expandEcsEnvironmentFile(tfMap map[string]interface{}) *types.EcsEnvironmentFile { + if tfMap == nil { return nil } - tfMap := map[string]interface{}{} + apiObject := &types.EcsEnvironmentFile{} - if v := apiObject.BatchJobParameters; v != nil { - tfMap["batch_job_parameters"] = []interface{}{flattenPipeTargetBatchJobParameters(v)} + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.EcsEnvironmentFileType(v) } - if v := apiObject.CloudWatchLogsParameters; v != nil { - tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetCloudWatchLogsParameters(v)} + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) } - // TODO + return apiObject +} - if v := apiObject.EventBridgeEventBusParameters; v != nil { - tfMap["eventbridge_event_bus_parameters"] = []interface{}{flattenPipeTargetEventBridgeEventBusParameters(v)} +func expandEcsEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile { + if len(tfList) == 0 { + return nil } - if v := apiObject.HttpParameters; v != nil { - tfMap["http_parameters"] = []interface{}{flattenPipeTargetHttpParameters(v)} - } + var apiObjects []types.EcsEnvironmentFile - if v := apiObject.InputTemplate; v != nil { - tfMap["input_template"] = aws.ToString(v) - } + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) - if v := apiObject.KinesisStreamParameters; v != nil { - tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeTargetKinesisStreamParameters(v)} - } + if !ok { + continue + } - if v := apiObject.LambdaFunctionParameters; v != nil { - tfMap["lambda_function_parameters"] = []interface{}{flattenPipeTargetLambdaFunctionParameters(v)} - } + apiObject := expandEcsEnvironmentFile(tfMap) - if v := apiObject.RedshiftDataParameters; v != nil { - tfMap["redshift_data_parameters"] = []interface{}{flattenPipeTargetRedshiftDataParameters(v)} + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - if v := apiObject.SageMakerPipelineParameters; v != nil { - tfMap["sagemaker_pipeline_parameters"] = []interface{}{flattenPipeTargetSageMakerPipelineParameters(v)} + return apiObjects +} + +func expandEcsResourceRequirement(tfMap map[string]interface{}) *types.EcsResourceRequirement { + if tfMap == nil { + return nil } - if v := apiObject.SqsQueueParameters; v != nil { - tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSqsQueueParameters(v)} + apiObject := &types.EcsResourceRequirement{} + + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.EcsResourceRequirementType(v) } - if v := apiObject.StepFunctionStateMachineParameters; v != nil { - tfMap["step_function_state_machine_parameters"] = []interface{}{flattenPipeTargetStateMachineParameters(v)} + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) } - return tfMap + return apiObject } -func flattenPipeTargetBatchJobParameters(apiObject *types.PipeTargetBatchJobParameters) map[string]interface{} { - if apiObject == nil { +func expandEcsResourceRequirements(tfList []interface{}) []types.EcsResourceRequirement { + if len(tfList) == 0 { return nil } - tfMap := map[string]interface{}{} + var apiObjects []types.EcsResourceRequirement - if v := apiObject.ArrayProperties; v != nil { - tfMap["array_properties"] = []interface{}{flattenBatchArrayProperties(v)} - } + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) - if v := apiObject.ContainerOverrides; v != nil { - tfMap["container_overrides"] = []interface{}{flattenBatchContainerOverrides(v)} - } + if !ok { + continue + } - if v := apiObject.DependsOn; v != nil { - tfMap["depends_on"] = flattenBatchJobDependencies(v) - } + apiObject := expandEcsResourceRequirement(tfMap) - if v := apiObject.JobDefinition; v != nil { - tfMap["job_definition"] = aws.ToString(v) + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - if v := apiObject.JobName; v != nil { - tfMap["job_name"] = aws.ToString(v) + return apiObjects +} + +func expandEcsEphemeralStorage(tfMap map[string]interface{}) *types.EcsEphemeralStorage { + if tfMap == nil { + return nil } - if v := apiObject.Parameters; v != nil { - tfMap["parameters"] = v + apiObject := &types.EcsEphemeralStorage{} + + if v, ok := tfMap["size_in_gib"].(int); ok { + apiObject.SizeInGiB = int32(v) } - if v := apiObject.RetryStrategy; v != nil { - tfMap["retry_strategy"] = []interface{}{flattenBatchRetryStrategy(v)} + return apiObject +} + +func expandEcsInferenceAcceleratorOverride(tfMap map[string]interface{}) *types.EcsInferenceAcceleratorOverride { + if tfMap == nil { + return nil } - return tfMap + apiObject := &types.EcsInferenceAcceleratorOverride{} + + if v, ok := tfMap["device_name"].(string); ok && v != "" { + apiObject.DeviceName = aws.String(v) + } + + if v, ok := tfMap["device_type"].(string); ok && v != "" { + apiObject.DeviceType = aws.String(v) + } + + return apiObject } -func flattenBatchArrayProperties(apiObject *types.BatchArrayProperties) map[string]interface{} { - if apiObject == nil { +func expandEcsInferenceAcceleratorOverrides(tfList []interface{}) []types.EcsInferenceAcceleratorOverride { + if len(tfList) == 0 { return nil } - tfMap := map[string]interface{}{} + var apiObjects []types.EcsInferenceAcceleratorOverride - if v := apiObject.Size; v != 0 { - tfMap["size"] = int(v) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandEcsInferenceAcceleratorOverride(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - return tfMap + return apiObjects } -func flattenBatchContainerOverrides(apiObject *types.BatchContainerOverrides) map[string]interface{} { - if apiObject == nil { +func expandPlacementConstraint(tfMap map[string]interface{}) *types.PlacementConstraint { + if tfMap == nil { return nil } - tfMap := map[string]interface{}{} + apiObject := &types.PlacementConstraint{} - if v := apiObject.Command; v != nil { - tfMap["command"] = v + if v, ok := tfMap["expression"].(string); ok && v != "" { + apiObject.Expression = aws.String(v) } - if v := apiObject.Environment; v != nil { - tfMap["environment"] = flattenBatchEnvironmentVariables(v) + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.PlacementConstraintType(v) } - if v := apiObject.InstanceType; v != nil { - tfMap["instance_type"] = aws.ToString(v) + return apiObject +} + +func expandPlacementConstraints(tfList []interface{}) []types.PlacementConstraint { + if len(tfList) == 0 { + return nil } - if v := apiObject.ResourceRequirements; v != nil { - tfMap["resource_requirement"] = flattenBatchResourceRequirements(v) + var apiObjects []types.PlacementConstraint + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandPlacementConstraint(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - return tfMap + return apiObjects } -func flattenBatchEnvironmentVariable(apiObject types.BatchEnvironmentVariable) map[string]interface{} { - tfMap := map[string]interface{}{} +func expandPlacementStrategy(tfMap map[string]interface{}) *types.PlacementStrategy { + if tfMap == nil { + return nil + } - if v := apiObject.Name; v != nil { - tfMap["name"] = v + apiObject := &types.PlacementStrategy{} + + if v, ok := tfMap["field"].(string); ok && v != "" { + apiObject.Field = aws.String(v) } - if v := apiObject.Value; v != nil { - tfMap["value"] = aws.ToString(v) + if v, ok := tfMap["type"].(string); ok && v != "" { + apiObject.Type = types.PlacementStrategyType(v) } - return tfMap + return apiObject } -func flattenBatchEnvironmentVariables(apiObjects []types.BatchEnvironmentVariable) []interface{} { - if len(apiObjects) == 0 { +func expandPlacementStrategies(tfList []interface{}) []types.PlacementStrategy { + if len(tfList) == 0 { return nil } - var tfList []interface{} + var apiObjects []types.PlacementStrategy - for _, apiObject := range apiObjects { - tfList = append(tfList, flattenBatchEnvironmentVariable(apiObject)) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandPlacementStrategy(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) } - return tfList + return apiObjects } -func flattenBatchResourceRequirement(apiObject types.BatchResourceRequirement) map[string]interface{} { - tfMap := map[string]interface{}{} +func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) *types.PipeTargetEventBridgeEventBusParameters { + if tfMap == nil { + return nil + } - if v := apiObject.Type; v != "" { - tfMap["type"] = v + apiObject := &types.PipeTargetEventBridgeEventBusParameters{} + + if v, ok := tfMap["detail_type"].(string); ok && v != "" { + apiObject.DetailType = aws.String(v) } - if v := apiObject.Value; v != nil { - tfMap["value"] = aws.ToString(v) + if v, ok := tfMap["endpoint_id"].(string); ok && v != "" { + apiObject.EndpointId = aws.String(v) } - return tfMap + if v, ok := tfMap["resources"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Resources = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["source"].(string); ok && v != "" { + apiObject.Source = aws.String(v) + } + + if v, ok := tfMap["time"].(string); ok && v != "" { + apiObject.Time = aws.String(v) + } + + return apiObject } -func flattenBatchResourceRequirements(apiObjects []types.BatchResourceRequirement) []interface{} { - if len(apiObjects) == 0 { +func expandPipeTargetHttpParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { + if tfMap == nil { return nil } - var tfList []interface{} + apiObject := &types.PipeTargetHttpParameters{} - for _, apiObject := range apiObjects { - tfList = append(tfList, flattenBatchResourceRequirement(apiObject)) + if v, ok := tfMap["header_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.HeaderParameters = flex.ExpandStringValueMap(v) } - return tfList + if v, ok := tfMap["path_parameter_values"].([]interface{}); ok && len(v) > 0 { + apiObject.PathParameterValues = flex.ExpandStringValueList(v) + } + + if v, ok := tfMap["query_string_parameters"].(map[string]interface{}); ok && len(v) > 0 { + apiObject.QueryStringParameters = flex.ExpandStringValueMap(v) + } + + return apiObject } -func flattenBatchJobDependency(apiObject types.BatchJobDependency) map[string]interface{} { - tfMap := map[string]interface{}{} +func expandPipeTargetKinesisStreamParameters(tfMap map[string]interface{}) *types.PipeTargetKinesisStreamParameters { + if tfMap == nil { + return nil + } - if v := apiObject.JobId; v != nil { - tfMap["job_id"] = aws.ToString(v) + apiObject := &types.PipeTargetKinesisStreamParameters{} + + if v, ok := tfMap["partition_key"].(string); ok && v != "" { + apiObject.PartitionKey = aws.String(v) } - if v := apiObject.Type; v != "" { - tfMap["type"] = v + return apiObject +} + +func expandPipeTargetLambdaFunctionParameters(tfMap map[string]interface{}) *types.PipeTargetLambdaFunctionParameters { + if tfMap == nil { + return nil } - return tfMap + apiObject := &types.PipeTargetLambdaFunctionParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } + + return apiObject } -func flattenBatchJobDependencies(apiObjects []types.BatchJobDependency) []interface{} { - if len(apiObjects) == 0 { +func expandPipeTargetRedshiftDataParameters(tfMap map[string]interface{}) *types.PipeTargetRedshiftDataParameters { + if tfMap == nil { return nil } - var tfList []interface{} + apiObject := &types.PipeTargetRedshiftDataParameters{} - for _, apiObject := range apiObjects { - tfList = append(tfList, flattenBatchJobDependency(apiObject)) + if v, ok := tfMap["database"].(string); ok && v != "" { + apiObject.Database = aws.String(v) } - return tfList + if v, ok := tfMap["db_user"].(string); ok && v != "" { + apiObject.DbUser = aws.String(v) + } + + if v, ok := tfMap["secret_manager_arn"].(string); ok && v != "" { + apiObject.SecretManagerArn = aws.String(v) + } + + if v, ok := tfMap["sqls"].(*schema.Set); ok && v.Len() > 0 { + apiObject.Sqls = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap["statement_name"].(string); ok && v != "" { + apiObject.StatementName = aws.String(v) + } + + if v, ok := tfMap["with_event"].(bool); ok { + apiObject.WithEvent = v + } + + return apiObject } -func flattenBatchRetryStrategy(apiObject *types.BatchRetryStrategy) map[string]interface{} { - if apiObject == nil { +func expandPipeTargetSageMakerPipelineParameters(tfMap map[string]interface{}) *types.PipeTargetSageMakerPipelineParameters { + if tfMap == nil { return nil } - tfMap := map[string]interface{}{} + apiObject := &types.PipeTargetSageMakerPipelineParameters{} - if v := apiObject.Attempts; v != 0 { - tfMap["attempts"] = int(v) + if v, ok := tfMap["pipeline_parameter"].([]interface{}); ok && len(v) > 0 { + apiObject.PipelineParameterList = expandSageMakerPipelineParameters(v) } - return tfMap + return apiObject } -func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudWatchLogsParameters) map[string]interface{} { - if apiObject == nil { +func expandSageMakerPipelineParameter(tfMap map[string]interface{}) *types.SageMakerPipelineParameter { + if tfMap == nil { return nil } - tfMap := map[string]interface{}{} + apiObject := &types.SageMakerPipelineParameter{} - if v := apiObject.LogStreamName; v != nil { - tfMap["log_stream_name"] = aws.ToString(v) + if v, ok := tfMap["name"].(string); ok && v != "" { + apiObject.Name = aws.String(v) } - if v := apiObject.Timestamp; v != nil { - tfMap["timestamp"] = aws.ToString(v) + if v, ok := tfMap["value"].(string); ok && v != "" { + apiObject.Value = aws.String(v) } - return tfMap + return apiObject } -func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetEventBridgeEventBusParameters) map[string]interface{} { +func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPipelineParameter { + if len(tfList) == 0 { + return nil + } + + var apiObjects []types.SageMakerPipelineParameter + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandSageMakerPipelineParameter(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, *apiObject) + } + + return apiObjects +} + +func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetSqsQueueParameters{} + + if v, ok := tfMap["message_deduplication_id"].(string); ok && v != "" { + apiObject.MessageDeduplicationId = aws.String(v) + } + + if v, ok := tfMap["message_group_id"].(string); ok && v != "" { + apiObject.MessageGroupId = aws.String(v) + } + + return apiObject +} + +func expandPipeTargetStateMachineParameters(tfMap map[string]interface{}) *types.PipeTargetStateMachineParameters { + if tfMap == nil { + return nil + } + + apiObject := &types.PipeTargetStateMachineParameters{} + + if v, ok := tfMap["invocation_type"].(string); ok && v != "" { + apiObject.InvocationType = types.PipeTargetInvocationType(v) + } + + return apiObject +} + +func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.DetailType; v != nil { - tfMap["detail_type"] = aws.ToString(v) + if v := apiObject.BatchJobParameters; v != nil { + tfMap["batch_job_parameters"] = []interface{}{flattenPipeTargetBatchJobParameters(v)} } - if v := apiObject.EndpointId; v != nil { - tfMap["endpoint_id"] = aws.ToString(v) + if v := apiObject.CloudWatchLogsParameters; v != nil { + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetCloudWatchLogsParameters(v)} } - if v := apiObject.Resources; v != nil { - tfMap["resources"] = v + if v := apiObject.EcsTaskParameters; v != nil { + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetEcsTaskParameters(v)} } - if v := apiObject.Source; v != nil { - tfMap["source"] = aws.ToString(v) + if v := apiObject.EventBridgeEventBusParameters; v != nil { + tfMap["eventbridge_event_bus_parameters"] = []interface{}{flattenPipeTargetEventBridgeEventBusParameters(v)} + } + + if v := apiObject.HttpParameters; v != nil { + tfMap["http_parameters"] = []interface{}{flattenPipeTargetHttpParameters(v)} + } + + if v := apiObject.InputTemplate; v != nil { + tfMap["input_template"] = aws.ToString(v) + } + + if v := apiObject.KinesisStreamParameters; v != nil { + tfMap["kinesis_stream_parameters"] = []interface{}{flattenPipeTargetKinesisStreamParameters(v)} + } + + if v := apiObject.LambdaFunctionParameters; v != nil { + tfMap["lambda_function_parameters"] = []interface{}{flattenPipeTargetLambdaFunctionParameters(v)} + } + + if v := apiObject.RedshiftDataParameters; v != nil { + tfMap["redshift_data_parameters"] = []interface{}{flattenPipeTargetRedshiftDataParameters(v)} + } + + if v := apiObject.SageMakerPipelineParameters; v != nil { + tfMap["sagemaker_pipeline_parameters"] = []interface{}{flattenPipeTargetSageMakerPipelineParameters(v)} + } + + if v := apiObject.SqsQueueParameters; v != nil { + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSqsQueueParameters(v)} } - if v := apiObject.Time; v != nil { - tfMap["time"] = aws.ToString(v) + if v := apiObject.StepFunctionStateMachineParameters; v != nil { + tfMap["step_function_state_machine_parameters"] = []interface{}{flattenPipeTargetStateMachineParameters(v)} } return tfMap } -func flattenPipeTargetHttpParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { +func flattenPipeTargetBatchJobParameters(apiObject *types.PipeTargetBatchJobParameters) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.HeaderParameters; v != nil { - tfMap["header_parameters"] = v + if v := apiObject.ArrayProperties; v != nil { + tfMap["array_properties"] = []interface{}{flattenBatchArrayProperties(v)} } - if v := apiObject.PathParameterValues; v != nil { - tfMap["path_parameter_values"] = v + if v := apiObject.ContainerOverrides; v != nil { + tfMap["container_overrides"] = []interface{}{flattenBatchContainerOverrides(v)} } - if v := apiObject.QueryStringParameters; v != nil { - tfMap["query_string_parameters"] = v + if v := apiObject.DependsOn; v != nil { + tfMap["depends_on"] = flattenBatchJobDependencies(v) } - return tfMap -} + if v := apiObject.JobDefinition; v != nil { + tfMap["job_definition"] = aws.ToString(v) + } -func flattenPipeTargetKinesisStreamParameters(apiObject *types.PipeTargetKinesisStreamParameters) map[string]interface{} { - if apiObject == nil { - return nil + if v := apiObject.JobName; v != nil { + tfMap["job_name"] = aws.ToString(v) } - tfMap := map[string]interface{}{} + if v := apiObject.Parameters; v != nil { + tfMap["parameters"] = v + } - if v := apiObject.PartitionKey; v != nil { - tfMap["partition_key"] = aws.ToString(v) + if v := apiObject.RetryStrategy; v != nil { + tfMap["retry_strategy"] = []interface{}{flattenBatchRetryStrategy(v)} } return tfMap } -func flattenPipeTargetLambdaFunctionParameters(apiObject *types.PipeTargetLambdaFunctionParameters) map[string]interface{} { +func flattenBatchArrayProperties(apiObject *types.BatchArrayProperties) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.InvocationType; v != "" { - tfMap["invocation_type"] = v + if v := apiObject.Size; v != 0 { + tfMap["size"] = int(v) } return tfMap } -func flattenPipeTargetRedshiftDataParameters(apiObject *types.PipeTargetRedshiftDataParameters) map[string]interface{} { +func flattenBatchContainerOverrides(apiObject *types.BatchContainerOverrides) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{ - "with_event": apiObject.WithEvent, + tfMap := map[string]interface{}{} + + if v := apiObject.Command; v != nil { + tfMap["command"] = v } - if v := apiObject.Database; v != nil { - tfMap["database"] = aws.ToString(v) + if v := apiObject.Environment; v != nil { + tfMap["environment"] = flattenBatchEnvironmentVariables(v) } - if v := apiObject.DbUser; v != nil { - tfMap["db_user"] = aws.ToString(v) + if v := apiObject.InstanceType; v != nil { + tfMap["instance_type"] = aws.ToString(v) } - if v := apiObject.SecretManagerArn; v != nil { - tfMap["secret_manager_arn"] = aws.ToString(v) + if v := apiObject.ResourceRequirements; v != nil { + tfMap["resource_requirement"] = flattenBatchResourceRequirements(v) } - if v := apiObject.Sqls; v != nil { - tfMap["sqls"] = v + return tfMap +} + +func flattenBatchEnvironmentVariable(apiObject types.BatchEnvironmentVariable) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) } - if v := apiObject.StatementName; v != nil { - tfMap["statement_name"] = aws.ToString(v) + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) } return tfMap } -func flattenPipeTargetSageMakerPipelineParameters(apiObject *types.PipeTargetSageMakerPipelineParameters) map[string]interface{} { - if apiObject == nil { +func flattenBatchEnvironmentVariables(apiObjects []types.BatchEnvironmentVariable) []interface{} { + if len(apiObjects) == 0 { return nil } - tfMap := map[string]interface{}{} + var tfList []interface{} - if v := apiObject.PipelineParameterList; v != nil { - tfMap["pipeline_parameter"] = flattenSageMakerPipelineParameters(v) + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchEnvironmentVariable(apiObject)) } - return tfMap + return tfList } -func flattenSageMakerPipelineParameter(apiObject types.SageMakerPipelineParameter) map[string]interface{} { +func flattenBatchResourceRequirement(apiObject types.BatchResourceRequirement) map[string]interface{} { tfMap := map[string]interface{}{} - if v := apiObject.Name; v != nil { - tfMap["name"] = aws.ToString(v) + if v := apiObject.Type; v != "" { + tfMap["type"] = v } if v := apiObject.Value; v != nil { @@ -1724,7 +2038,7 @@ func flattenSageMakerPipelineParameter(apiObject types.SageMakerPipelineParamete return tfMap } -func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelineParameter) []interface{} { +func flattenBatchResourceRequirements(apiObjects []types.BatchResourceRequirement) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1732,1235 +2046,658 @@ func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelinePara var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenSageMakerPipelineParameter(apiObject)) + tfList = append(tfList, flattenBatchResourceRequirement(apiObject)) } return tfList } -func flattenPipeTargetSqsQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenBatchJobDependency(apiObject types.BatchJobDependency) map[string]interface{} { tfMap := map[string]interface{}{} - if v := apiObject.MessageDeduplicationId; v != nil { - tfMap["message_deduplication_id"] = aws.ToString(v) + if v := apiObject.JobId; v != nil { + tfMap["job_id"] = aws.ToString(v) } - if v := apiObject.MessageGroupId; v != nil { - tfMap["message_group_id"] = aws.ToString(v) + if v := apiObject.Type; v != "" { + tfMap["type"] = v } return tfMap } -func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMachineParameters) map[string]interface{} { - if apiObject == nil { +func flattenBatchJobDependencies(apiObjects []types.BatchJobDependency) []interface{} { + if len(apiObjects) == 0 { return nil } - tfMap := map[string]interface{}{} + var tfList []interface{} - if v := apiObject.InvocationType; v != "" { - tfMap["invocation_type"] = v + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenBatchJobDependency(apiObject)) } - return tfMap + return tfList } -/* -func expandTargetParameters(config []interface{}) *types.PipeTargetParameters { - if len(config) == 0 { +func flattenBatchRetryStrategy(apiObject *types.BatchRetryStrategy) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.PipeTargetParameters - for _, c := range config { - param, ok := c.(map[string]interface{}) - if !ok { - return nil - } - - if val, ok := param["batch_target"]; ok { - parameters.BatchJobParameters = expandTargetBatchJobParameters(val.([]interface{})) - } - - if val, ok := param["cloudwatch_logs"]; ok { - parameters.CloudWatchLogsParameters = expandTargetCloudWatchLogsParameters(val.([]interface{})) - } - - if val, ok := param["ecs_task"]; ok { - parameters.EcsTaskParameters = expandTargetEcsTaskParameters(val.([]interface{})) - } - - if val, ok := param["event_bridge_event_bus"]; ok { - parameters.EventBridgeEventBusParameters = expandTargetEventBridgeEventBusParameters(val.([]interface{})) - } - - if val, ok := param["http_parameters"]; ok { - parameters.HttpParameters = expandTargetHTTPParameters(val.([]interface{})) - } - - if val, ok := param["input_template"].(string); ok && val != "" { - parameters.InputTemplate = aws.String(val) - } - - if val, ok := param["kinesis_stream"]; ok { - parameters.KinesisStreamParameters = expandTargetKinesisStreamParameters(val.([]interface{})) - } - - if val, ok := param["lambda_function"]; ok { - parameters.LambdaFunctionParameters = expandTargetLambdaFunctionParameters(val.([]interface{})) - } - - if val, ok := param["redshift_data"]; ok { - parameters.RedshiftDataParameters = expandTargetRedshiftDataParameters(val.([]interface{})) - } - - if val, ok := param["sagemaker_pipeline"]; ok { - parameters.SageMakerPipelineParameters = expandTargetSageMakerPipelineParameters(val.([]interface{})) - } - - if val, ok := param["sqs_queue"]; ok { - parameters.SqsQueueParameters = expandTargetSqsQueueParameters(val.([]interface{})) - } + tfMap := map[string]interface{}{} - if val, ok := param["step_function"]; ok { - parameters.StepFunctionStateMachineParameters = expandTargetStepFunctionStateMachineParameters(val.([]interface{})) - } + if v := apiObject.Attempts; v != 0 { + tfMap["attempts"] = int(v) } - return ¶meters + + return tfMap } -func expandTargetBatchJobParameters(config []interface{}) *types.PipeTargetBatchJobParameters { - if len(config) == 0 { +func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudWatchLogsParameters) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.PipeTargetBatchJobParameters - for _, c := range config { - param := c.(map[string]interface{}) - - parameters.JobDefinition = expandString("job_definition", param) - parameters.JobName = expandString("job_name", param) - if val, ok := param["retry_strategy"]; ok { - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if attempts, ok := valueParam["attempts"].(int32); ok { - parameters.RetryStrategy = &types.BatchRetryStrategy{ - Attempts: attempts, - } - } - } - } - } - if val, ok := param["array_properties"]; ok { - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if size, ok := valueParam["size"].(int32); ok { - parameters.ArrayProperties = &types.BatchArrayProperties{ - Size: size, - } - } - } - } - } - - if val, ok := param["parameters"]; ok { - batchTargetParameters := map[string]string{} - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if key, ok := valueParam["key"].(string); ok && key != "" { - if value, ok := valueParam["value"].(string); ok && value != "" { - batchTargetParameters[key] = value - } - } - } - } - if len(batchTargetParameters) > 0 { - parameters.Parameters = batchTargetParameters - } - } + tfMap := map[string]interface{}{} - if val, ok := param["depends_on"]; ok { - var dependsOn []types.BatchJobDependency - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var dependency types.BatchJobDependency - dependency.JobId = expandString("job_id", valueParam) - dependancyType := expandStringValue("type", valueParam) - if dependancyType != "" { - dependency.Type = types.BatchJobDependencyType(dependancyType) - } - dependsOn = append(dependsOn, dependency) - } - } - if len(dependsOn) > 0 { - parameters.DependsOn = dependsOn - } - } + if v := apiObject.LogStreamName; v != nil { + tfMap["log_stream_name"] = aws.ToString(v) + } - if val, ok := param["container_overrides"]; ok { - parameters.ContainerOverrides = expandTargetBatchContainerOverrides(val.([]interface{})) - } + if v := apiObject.Timestamp; v != nil { + tfMap["timestamp"] = aws.ToString(v) } - return ¶meters + return tfMap } -func expandTargetBatchContainerOverrides(config []interface{}) *types.BatchContainerOverrides { - if len(config) == 0 { +func flattenPipeTargetEcsTaskParameters(apiObject *types.PipeTargetEcsTaskParameters) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.BatchContainerOverrides - for _, c := range config { - param := c.(map[string]interface{}) - if value, ok := param["command"]; ok { - parameters.Command = flex.ExpandStringValueList(value.([]interface{})) - } - parameters.InstanceType = expandString("instance_type", param) - - if val, ok := param["environment"]; ok { - var environment []types.BatchEnvironmentVariable - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var env types.BatchEnvironmentVariable - env.Name = expandString("name", valueParam) - env.Value = expandString("value", valueParam) - environment = append(environment, env) - } - } - if len(environment) > 0 { - parameters.Environment = environment - } - } - - if val, ok := param["resource_requirements"]; ok { - var resourceRequirements []types.BatchResourceRequirement - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var resourceRequirement types.BatchResourceRequirement - resourceRequirementType := expandStringValue("type", valueParam) - if resourceRequirementType != "" { - resourceRequirement.Type = types.BatchResourceRequirementType(resourceRequirementType) - } - resourceRequirement.Value = expandString("value", valueParam) - resourceRequirements = append(resourceRequirements, resourceRequirement) - } - } - if len(resourceRequirements) > 0 { - parameters.ResourceRequirements = resourceRequirements - } - } + tfMap := map[string]interface{}{ + "enable_ecs_managed_tags": apiObject.EnableECSManagedTags, + "enable_execute_command": apiObject.EnableExecuteCommand, } - return ¶meters -} - -func expandTargetCloudWatchLogsParameters(config []interface{}) *types.PipeTargetCloudWatchLogsParameters { - if len(config) == 0 { - return nil + if v := apiObject.CapacityProviderStrategy; v != nil { + tfMap["capacity_provider_strategy"] = flattenCapacityProviderStrategyItems(v) } - var parameters types.PipeTargetCloudWatchLogsParameters - for _, c := range config { - param := c.(map[string]interface{}) + if v := apiObject.Group; v != nil { + tfMap["group"] = aws.ToString(v) + } - parameters.LogStreamName = expandString("log_stream_name", param) - parameters.Timestamp = expandString("timestamp", param) + if v := apiObject.LaunchType; v != "" { + tfMap["launch_type"] = v } - return ¶meters -} + if v := apiObject.NetworkConfiguration; v != nil { + tfMap["network_configuration"] = []interface{}{flattenNetworkConfiguration(v)} + } -func expandTargetEcsTaskParameters(config []interface{}) *types.PipeTargetEcsTaskParameters { - if len(config) == 0 { - return nil + if v := apiObject.Overrides; v != nil { + tfMap["overrides"] = []interface{}{flattenEcsTaskOverride(v)} } - var parameters types.PipeTargetEcsTaskParameters - for _, c := range config { - param := c.(map[string]interface{}) + if v := apiObject.PlacementConstraints; v != nil { + tfMap["placement_constraint"] = flattenPlacementConstraints(v) + } - parameters.TaskDefinitionArn = expandString("task_definition_arn", param) - parameters.EnableECSManagedTags = expandBool("enable_ecs_managed_tags", param) - parameters.EnableExecuteCommand = expandBool("enable_execute_command", param) - parameters.Group = expandString("group", param) - launchType := expandStringValue("launch_type", param) - if launchType != "" { - parameters.LaunchType = types.LaunchType(launchType) - } - parameters.PlatformVersion = expandString("platform_version", param) - propagateTags := expandStringValue("propagate_tags", param) - if propagateTags != "" { - parameters.PropagateTags = types.PropagateTags(propagateTags) - } - parameters.ReferenceId = expandString("reference_id", param) - parameters.TaskCount = expandInt32("task_count", param) + if v := apiObject.PlacementStrategy; v != nil { + tfMap["placement_strategy"] = flattenPlacementStrategies(v) + } - if val, ok := param["capacity_provider_strategy"]; ok { - parameters.CapacityProviderStrategy = expandTargetCapacityProviderStrategy(val.([]interface{})) - } - if val, ok := param["network_configuration"]; ok { - parameters.NetworkConfiguration = expandTargetNetworkConfiguration(val.([]interface{})) - } - if val, ok := param["placement_constraints"]; ok { - parameters.PlacementConstraints = expandTargetPlacementConstraints(val.([]interface{})) - } - if val, ok := param["placement_strategy"]; ok { - parameters.PlacementStrategy = expandTargetPlacementStrategies(val.([]interface{})) - } - if val, ok := param["tags"]; ok { - parameters.Tags = expandTargetECSTaskTags(val.([]interface{})) - } - if val, ok := param["overrides"]; ok { - parameters.Overrides = expandTargetECSTaskOverrides(val.([]interface{})) - } + if v := apiObject.PlatformVersion; v != nil { + tfMap["platform_version"] = aws.ToString(v) } - return ¶meters -} + if v := apiObject.PropagateTags; v != "" { + tfMap["propagate_tags"] = v + } -func expandTargetCapacityProviderStrategy(config []interface{}) []types.CapacityProviderStrategyItem { - if len(config) == 0 { - return nil + if v := apiObject.ReferenceId; v != nil { + tfMap["reference_id"] = aws.ToString(v) } - var parameters []types.CapacityProviderStrategyItem - for _, c := range config { - param := c.(map[string]interface{}) + if v := apiObject.Tags; v != nil { + tags := map[string]interface{}{} - var provider types.CapacityProviderStrategyItem - provider.CapacityProvider = expandString("capacity_provider", param) - base := expandInt32("base", param) - if base != nil { - provider.Base = aws.ToInt32(base) - } - weight := expandInt32("weight", param) - if weight != nil { - provider.Weight = aws.ToInt32(weight) + for _, apiObject := range v { + tags[aws.ToString(apiObject.Key)] = aws.ToString(apiObject.Value) } - parameters = append(parameters, provider) + tfMap["tags"] = tags } - return parameters -} - -func expandTargetNetworkConfiguration(config []interface{}) *types.NetworkConfiguration { - if len(config) == 0 { - return nil + if v := apiObject.TaskCount; v != nil { + tfMap["task_count"] = aws.ToInt32(v) } - var parameters types.NetworkConfiguration - for _, c := range config { - param := c.(map[string]interface{}) - - if val, ok := param["aws_vpc_configuration"]; ok { - parameters.AwsvpcConfiguration = expandTargetAWSVPCConfiguration(val.([]interface{})) - } + if v := apiObject.TaskDefinitionArn; v != nil { + tfMap["task_definition_arn"] = aws.ToString(v) } - return ¶meters + return tfMap } -func expandTargetAWSVPCConfiguration(config []interface{}) *types.AwsVpcConfiguration { - if len(config) == 0 { - return nil +func flattenCapacityProviderStrategyItem(apiObject types.CapacityProviderStrategyItem) map[string]interface{} { + tfMap := map[string]interface{}{ + "base": apiObject.Base, + "weight": apiObject.Weight, } - var parameters types.AwsVpcConfiguration - for _, c := range config { - param := c.(map[string]interface{}) - assignPublicIp := expandStringValue("assign_public_ip", param) - if assignPublicIp != "" { - parameters.AssignPublicIp = types.AssignPublicIp(assignPublicIp) - } - - if value, ok := param["security_groups"]; ok && value.(*schema.Set).Len() > 0 { - parameters.SecurityGroups = flex.ExpandStringValueSet(value.(*schema.Set)) - } - - if value, ok := param["subnets"]; ok && value.(*schema.Set).Len() > 0 { - parameters.Subnets = flex.ExpandStringValueSet(value.(*schema.Set)) - } + if v := apiObject.CapacityProvider; v != nil { + tfMap["capacity_provider"] = aws.ToString(v) } - return ¶meters + return tfMap } -func expandTargetPlacementConstraints(config []interface{}) []types.PlacementConstraint { - if len(config) == 0 { +func flattenCapacityProviderStrategyItems(apiObjects []types.CapacityProviderStrategyItem) []interface{} { + if len(apiObjects) == 0 { return nil } - var parameters []types.PlacementConstraint - for _, c := range config { - param := c.(map[string]interface{}) - - var constraint types.PlacementConstraint - constraint.Expression = expandString("expression", param) - constraintType := expandStringValue("type", param) - if constraintType != "" { - constraint.Type = types.PlacementConstraintType(constraintType) - } + var tfList []interface{} - parameters = append(parameters, constraint) + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenCapacityProviderStrategyItem(apiObject)) } - return parameters + return tfList } -func expandTargetPlacementStrategies(config []interface{}) []types.PlacementStrategy { - if len(config) == 0 { +func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interface{} { + if apiObject == nil { return nil } - var parameters []types.PlacementStrategy - for _, c := range config { - param := c.(map[string]interface{}) - - var strategy types.PlacementStrategy - strategy.Field = expandString("field", param) - strategyType := expandStringValue("type", param) - if strategyType != "" { - strategy.Type = types.PlacementStrategyType(strategyType) - } + tfMap := map[string]interface{}{} - parameters = append(parameters, strategy) + if v := apiObject.ContainerOverrides; v != nil { + tfMap["container_override"] = flattenEcsContainerOverrides(v) } - return parameters -} - -func expandTargetECSTaskTags(config []interface{}) []types.Tag { - if len(config) == 0 { - return nil + if v := apiObject.Cpu; v != nil { + tfMap["cpu"] = aws.ToString(v) } - var parameters []types.Tag - for _, c := range config { - param := c.(map[string]interface{}) - - var tag types.Tag - tag.Key = expandString("key", param) - tag.Value = expandString("value", param) - - parameters = append(parameters, tag) + if v := apiObject.EphemeralStorage; v != nil { + tfMap["ephemeral_storage"] = []interface{}{flattenEcsEphemeralStorage(v)} } - return parameters -} + if v := apiObject.ExecutionRoleArn; v != nil { + tfMap["execution_role_arn"] = aws.ToString(v) + } -func expandTargetECSTaskOverrides(config []interface{}) *types.EcsTaskOverride { - if len(config) == 0 { - return nil + if v := apiObject.InferenceAcceleratorOverrides; v != nil { + tfMap["inference_accelerator_override"] = flattenEcsInferenceAcceleratorOverrides(v) } - var parameters types.EcsTaskOverride - for _, c := range config { - param := c.(map[string]interface{}) - parameters.Cpu = expandString("cpu", param) - parameters.Memory = expandString("memory", param) - parameters.ExecutionRoleArn = expandString("execution_role_arn", param) - parameters.TaskRoleArn = expandString("task_role_arn", param) + if v := apiObject.Memory; v != nil { + tfMap["memory"] = aws.ToString(v) + } - if val, ok := param["inference_accelerator_overrides"]; ok { - var inferenceAcceleratorOverrides []types.EcsInferenceAcceleratorOverride - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) + if v := apiObject.TaskRoleArn; v != nil { + tfMap["task_role_arn"] = aws.ToString(v) + } - var override types.EcsInferenceAcceleratorOverride - override.DeviceName = expandString("device_name", valueParam) - override.DeviceType = expandString("device_type", valueParam) - inferenceAcceleratorOverrides = append(inferenceAcceleratorOverrides, override) - } - } - if len(inferenceAcceleratorOverrides) > 0 { - parameters.InferenceAcceleratorOverrides = inferenceAcceleratorOverrides - } - } + return tfMap +} - if val, ok := param["ecs_ephemeral_storage"]; ok { - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if size, ok := valueParam["size_in_gib"].(int32); ok { - parameters.EphemeralStorage = &types.EcsEphemeralStorage{ - SizeInGiB: size, - } - } - } - } - } +func flattenEcsContainerOverride(apiObject types.EcsContainerOverride) map[string]interface{} { + tfMap := map[string]interface{}{} - if val, ok := param["container_overrides"]; ok { - parameters.ContainerOverrides = expandTargetECSTaskOverrideContainerOverrides(val.([]interface{})) - } + if v := apiObject.Command; v != nil { + tfMap["command"] = v } - return ¶meters -} - -func expandTargetECSTaskOverrideContainerOverrides(config []interface{}) []types.EcsContainerOverride { - if len(config) == 0 { - return nil + if v := apiObject.Cpu; v != nil { + tfMap["cpu"] = aws.ToInt32(v) } - var parameters []types.EcsContainerOverride - for _, c := range config { - param := c.(map[string]interface{}) + if v := apiObject.Environment; v != nil { + tfMap["environment"] = flattenEcsEnvironmentVariables(v) + } - var override types.EcsContainerOverride - override.Cpu = expandInt32("cpu", param) - override.Memory = expandInt32("memory", param) - override.MemoryReservation = expandInt32("memory_reservation", param) - override.Name = expandString("name", param) - if value, ok := param["command"]; ok { - override.Command = flex.ExpandStringValueList(value.([]interface{})) - } + if v := apiObject.EnvironmentFiles; v != nil { + tfMap["environment_file"] = flattenEcsEnvironmentFiles(v) + } - if val, ok := param["environment"]; ok { - var environment []types.EcsEnvironmentVariable - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var env types.EcsEnvironmentVariable - env.Name = expandString("name", valueParam) - env.Value = expandString("value", valueParam) - environment = append(environment, env) - } - } - if len(environment) > 0 { - override.Environment = environment - } - } + if v := apiObject.Memory; v != nil { + tfMap["memory"] = aws.ToInt32(v) + } - if val, ok := param["environment_files"]; ok { - var environment []types.EcsEnvironmentFile - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var env types.EcsEnvironmentFile - envType := expandStringValue("type", valueParam) - if envType != "" { - env.Type = types.EcsEnvironmentFileType(envType) - } - env.Value = expandString("value", valueParam) - environment = append(environment, env) - } - } - if len(environment) > 0 { - override.EnvironmentFiles = environment - } - } + if v := apiObject.MemoryReservation; v != nil { + tfMap["memory_reservation"] = aws.ToInt32(v) + } - if val, ok := param["resource_requirements"]; ok { - var resourceRequirements []types.EcsResourceRequirement - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - var resourceRequirement types.EcsResourceRequirement - resourceRequirementType := expandStringValue("type", valueParam) - if resourceRequirementType != "" { - resourceRequirement.Type = types.EcsResourceRequirementType(resourceRequirementType) - } - resourceRequirement.Value = expandString("value", valueParam) - resourceRequirements = append(resourceRequirements, resourceRequirement) - } - } - if len(resourceRequirements) > 0 { - override.ResourceRequirements = resourceRequirements - } - } + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) + } - parameters = append(parameters, override) + if v := apiObject.ResourceRequirements; v != nil { + tfMap["resource_requirement"] = flattenEcsResourceRequirements(v) } - return parameters + return tfMap } -func expandTargetEventBridgeEventBusParameters(config []interface{}) *types.PipeTargetEventBridgeEventBusParameters { - if len(config) == 0 { +func flattenEcsContainerOverrides(apiObjects []types.EcsContainerOverride) []interface{} { + if len(apiObjects) == 0 { return nil } - var parameters types.PipeTargetEventBridgeEventBusParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.DetailType = expandString("detail_type", param) - parameters.EndpointId = expandString("endpoint_id", param) - parameters.Source = expandString("source", param) - parameters.Time = expandString("time", param) - if value, ok := param["resources"]; ok && value.(*schema.Set).Len() > 0 { - parameters.Resources = flex.ExpandStringValueSet(value.(*schema.Set)) - } + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenEcsContainerOverride(apiObject)) } - return ¶meters + return tfList } -func expandTargetHTTPParameters(config []interface{}) *types.PipeTargetHttpParameters { - if len(config) == 0 { - return nil - } - - var parameters types.PipeTargetHttpParameters - for _, c := range config { - param := c.(map[string]interface{}) - if val, ok := param["path_parameters"]; ok { - parameters.PathParameterValues = flex.ExpandStringValueList(val.([]interface{})) - } +func flattenEcsResourceRequirement(apiObject types.EcsResourceRequirement) map[string]interface{} { + tfMap := map[string]interface{}{} - if val, ok := param["header"]; ok { - headers := map[string]string{} - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if key, ok := valueParam["key"].(string); ok && key != "" { - if value, ok := valueParam["value"].(string); ok && value != "" { - headers[key] = value - } - } - } - } - if len(headers) > 0 { - parameters.HeaderParameters = headers - } - } + if v := apiObject.Type; v != "" { + tfMap["name"] = v + } - if val, ok := param["query_string"]; ok { - queryStrings := map[string]string{} - if values, ok := val.([]interface{}); ok { - for _, v := range values { - valueParam := v.(map[string]interface{}) - - if key, ok := valueParam["key"].(string); ok && key != "" { - if value, ok := valueParam["value"].(string); ok && value != "" { - queryStrings[key] = value - } - } - } - } - if len(queryStrings) > 0 { - parameters.QueryStringParameters = queryStrings - } - } + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) } - return ¶meters + + return tfMap } -func expandTargetKinesisStreamParameters(config []interface{}) *types.PipeTargetKinesisStreamParameters { - if len(config) == 0 { +func flattenEcsResourceRequirements(apiObjects []types.EcsResourceRequirement) []interface{} { + if len(apiObjects) == 0 { return nil } - var parameters types.PipeTargetKinesisStreamParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.PartitionKey = expandString("partition_key", param) + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenEcsResourceRequirement(apiObject)) } - return ¶meters + return tfList } -func expandTargetLambdaFunctionParameters(config []interface{}) *types.PipeTargetLambdaFunctionParameters { - if len(config) == 0 { - return nil +func flattenEcsEnvironmentFile(apiObject types.EcsEnvironmentFile) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap["name"] = v } - var parameters types.PipeTargetLambdaFunctionParameters - for _, c := range config { - param := c.(map[string]interface{}) - invocationType := expandStringValue("invocation_type", param) - if invocationType != "" { - parameters.InvocationType = types.PipeTargetInvocationType(invocationType) - } + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) } - return ¶meters + return tfMap } -func expandTargetRedshiftDataParameters(config []interface{}) *types.PipeTargetRedshiftDataParameters { - if len(config) == 0 { - return nil +func flattenEcsEnvironmentVariable(apiObject types.EcsEnvironmentVariable) map[string]interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) } - var parameters types.PipeTargetRedshiftDataParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.Database = expandString("database", param) - parameters.DbUser = expandString("database_user", param) - parameters.SecretManagerArn = expandString("secret_manager_arn", param) - parameters.StatementName = expandString("statement_name", param) - parameters.WithEvent = expandBool("with_event", param) - if value, ok := param["sqls"]; ok && value.(*schema.Set).Len() > 0 { - parameters.Sqls = flex.ExpandStringValueSet(value.(*schema.Set)) - } + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) } - return ¶meters + return tfMap } -func expandTargetSageMakerPipelineParameters(config []interface{}) *types.PipeTargetSageMakerPipelineParameters { - if len(config) == 0 { +func flattenEcsEnvironmentVariables(apiObjects []types.EcsEnvironmentVariable) []interface{} { + if len(apiObjects) == 0 { return nil } - var parameters types.PipeTargetSageMakerPipelineParameters - for _, c := range config { - param := c.(map[string]interface{}) - if val, ok := param["parameters"]; ok { - parametersConfig := val.([]interface{}) - var params []types.SageMakerPipelineParameter - for _, p := range parametersConfig { - pp := p.(map[string]interface{}) - name := expandString("name", pp) - value := expandString("value", pp) - if name != nil { - params = append(params, types.SageMakerPipelineParameter{ - Name: name, - Value: value, - }) - } - } - parameters.PipelineParameterList = params - } + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenEcsEnvironmentVariable(apiObject)) } - return ¶meters + return tfList } -func expandTargetSqsQueueParameters(config []interface{}) *types.PipeTargetSqsQueueParameters { - if len(config) == 0 { +func flattenEcsEnvironmentFiles(apiObjects []types.EcsEnvironmentFile) []interface{} { + if len(apiObjects) == 0 { return nil } - var parameters types.PipeTargetSqsQueueParameters - for _, c := range config { - param := c.(map[string]interface{}) - parameters.MessageDeduplicationId = expandString("message_deduplication_id", param) - parameters.MessageGroupId = expandString("message_group_id", param) + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenEcsEnvironmentFile(apiObject)) } - return ¶meters + return tfList } -func expandTargetStepFunctionStateMachineParameters(config []interface{}) *types.PipeTargetStateMachineParameters { - if len(config) == 0 { +func flattenEcsEphemeralStorage(apiObject *types.EcsEphemeralStorage) map[string]interface{} { + if apiObject == nil { return nil } - var parameters types.PipeTargetStateMachineParameters - for _, c := range config { - param := c.(map[string]interface{}) - invocationType := expandStringValue("invocation_type", param) - if invocationType != "" { - parameters.InvocationType = types.PipeTargetInvocationType(invocationType) - } + tfMap := map[string]interface{}{ + "size_in_gib": apiObject.SizeInGiB, } - return ¶meters + return tfMap } -func flattenTargetParameters(targetParameters *types.PipeTargetParameters) []map[string]interface{} { - config := make(map[string]interface{}) +func flattenEcsInferenceAcceleratorOverride(apiObject types.EcsInferenceAcceleratorOverride) map[string]interface{} { + tfMap := map[string]interface{}{} - if targetParameters.BatchJobParameters != nil { - config["batch_target"] = flattenTargetBatchJobParameters(targetParameters.BatchJobParameters) + if v := apiObject.DeviceName; v != nil { + tfMap["device_name"] = aws.ToString(v) } - if targetParameters.CloudWatchLogsParameters != nil { - config["cloudwatch_logs"] = flattenTargetCloudWatchLogsParameters(targetParameters.CloudWatchLogsParameters) + if v := apiObject.DeviceType; v != nil { + tfMap["device_type"] = aws.ToString(v) } - if targetParameters.EcsTaskParameters != nil { - config["ecs_task"] = flattenTargetEcsTaskParameters(targetParameters.EcsTaskParameters) - } + return tfMap +} - if targetParameters.EventBridgeEventBusParameters != nil { - config["event_bridge_event_bus"] = flattenTargetEventBridgeEventBusParameters(targetParameters.EventBridgeEventBusParameters) +func flattenEcsInferenceAcceleratorOverrides(apiObjects []types.EcsInferenceAcceleratorOverride) []interface{} { + if len(apiObjects) == 0 { + return nil } - if targetParameters.HttpParameters != nil { - config["http_parameters"] = flattenTargetHttpParameters(targetParameters.HttpParameters) - } + var tfList []interface{} - if targetParameters.InputTemplate != nil { - config["input_template"] = aws.ToString(targetParameters.InputTemplate) + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenEcsInferenceAcceleratorOverride(apiObject)) } - if targetParameters.KinesisStreamParameters != nil { - config["kinesis_stream"] = flattenTargetKinesisStreamParameters(targetParameters.KinesisStreamParameters) - } + return tfList +} - if targetParameters.LambdaFunctionParameters != nil { - config["lambda_function"] = flattenTargetLambdaFunctionParameters(targetParameters.LambdaFunctionParameters) +func flattenNetworkConfiguration(apiObject *types.NetworkConfiguration) map[string]interface{} { + if apiObject == nil { + return nil } - if targetParameters.RedshiftDataParameters != nil { - config["redshift_data"] = flattenTargetRedshiftDataParameters(targetParameters.RedshiftDataParameters) + tfMap := map[string]interface{}{} + + if v := apiObject.AwsvpcConfiguration; v != nil { + tfMap["aws_vpc_configuration"] = []interface{}{flattenAWSVPCConfiguration(v)} } - if targetParameters.SageMakerPipelineParameters != nil { - config["sagemaker_pipeline"] = flattenTargetSageMakerPipelineParameters(targetParameters.SageMakerPipelineParameters) + return tfMap +} + +func flattenAWSVPCConfiguration(apiObject *types.AwsVpcConfiguration) map[string]interface{} { + if apiObject == nil { + return nil } - if targetParameters.SqsQueueParameters != nil { - config["sqs_queue"] = flattenTargetSqsQueueParameters(targetParameters.SqsQueueParameters) + tfMap := map[string]interface{}{} + + if v := apiObject.AssignPublicIp; v != "" { + tfMap["assign_public_ip"] = v } - if targetParameters.StepFunctionStateMachineParameters != nil { - config["step_function"] = flattenTargetStepFunctionStateMachineParameters(targetParameters.StepFunctionStateMachineParameters) + if v := apiObject.SecurityGroups; v != nil { + tfMap["security_groups"] = v } - if len(config) == 0 { - return nil + if v := apiObject.Subnets; v != nil { + tfMap["subnets"] = v } - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetBatchJobParameters(parameters *types.PipeTargetBatchJobParameters) []map[string]interface{} { - config := make(map[string]interface{}) +func flattenPlacementConstraint(apiObject types.PlacementConstraint) map[string]interface{} { + tfMap := map[string]interface{}{} - if parameters.JobDefinition != nil { - config["job_definition"] = aws.ToString(parameters.JobDefinition) - } - if parameters.JobName != nil { - config["job_name"] = aws.ToString(parameters.JobName) + if v := apiObject.Expression; v != nil { + tfMap["expression"] = aws.ToString(v) } - var parameterValues []map[string]interface{} - for key, value := range parameters.Parameters { - p := make(map[string]interface{}) - p["key"] = key - p["value"] = value - parameterValues = append(parameterValues, p) + if v := apiObject.Type; v != "" { + tfMap["type"] = v } - config["parameters"] = parameterValues - if parameters.RetryStrategy != nil { - retryStrategyConfig := make(map[string]interface{}) - retryStrategyConfig["attempts"] = parameters.RetryStrategy.Attempts - config["retry_strategy"] = []map[string]interface{}{retryStrategyConfig} - } + return tfMap +} - if parameters.ArrayProperties != nil { - arrayPropertiesConfig := make(map[string]interface{}) - arrayPropertiesConfig["size"] = parameters.ArrayProperties.Size - config["array_properties"] = []map[string]interface{}{arrayPropertiesConfig} +func flattenPlacementConstraints(apiObjects []types.PlacementConstraint) []interface{} { + if len(apiObjects) == 0 { + return nil } - var dependsOnValues []map[string]interface{} - for _, value := range parameters.DependsOn { - dependsOn := make(map[string]interface{}) - dependsOn["job_id"] = aws.ToString(value.JobId) - dependsOn["type"] = value.Type - dependsOnValues = append(dependsOnValues, dependsOn) - } - config["depends_on"] = dependsOnValues + var tfList []interface{} - if parameters.ContainerOverrides != nil { - config["container_overrides"] = flattenTargetBatchContainerOverrides(parameters.ContainerOverrides) + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPlacementConstraint(apiObject)) } - result := []map[string]interface{}{config} - return result + return tfList } -func flattenTargetBatchContainerOverrides(parameters *types.BatchContainerOverrides) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.Command != nil { - config["command"] = flex.FlattenStringValueSet(parameters.Command) - } - if parameters.InstanceType != nil { - config["instance_type"] = aws.ToString(parameters.InstanceType) - } +func flattenPlacementStrategy(apiObject types.PlacementStrategy) map[string]interface{} { + tfMap := map[string]interface{}{} - var environmentValues []map[string]interface{} - for _, value := range parameters.Environment { - env := make(map[string]interface{}) - env["name"] = aws.ToString(value.Name) - env["value"] = aws.ToString(value.Value) - environmentValues = append(environmentValues, env) + if v := apiObject.Field; v != nil { + tfMap["field"] = aws.ToString(v) } - config["environment"] = environmentValues - var resourceRequirementsValues []map[string]interface{} - for _, value := range parameters.ResourceRequirements { - rr := make(map[string]interface{}) - rr["type"] = value.Type - rr["value"] = aws.ToString(value.Value) - resourceRequirementsValues = append(resourceRequirementsValues, rr) + if v := apiObject.Type; v != "" { + tfMap["type"] = v } - config["resource_requirements"] = resourceRequirementsValues - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetCloudWatchLogsParameters(parameters *types.PipeTargetCloudWatchLogsParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.LogStreamName != nil { - config["log_stream_name"] = aws.ToString(parameters.LogStreamName) +func flattenPlacementStrategies(apiObjects []types.PlacementStrategy) []interface{} { + if len(apiObjects) == 0 { + return nil } - if parameters.Timestamp != nil { - config["timestamp"] = aws.ToString(parameters.Timestamp) + + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenPlacementStrategy(apiObject)) } - result := []map[string]interface{}{config} - return result + return tfList } -func flattenTargetEcsTaskParameters(parameters *types.PipeTargetEcsTaskParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.TaskDefinitionArn != nil { - config["task_definition_arn"] = aws.ToString(parameters.TaskDefinitionArn) - } - config["enable_ecs_managed_tags"] = parameters.EnableECSManagedTags - config["enable_execute_command"] = parameters.EnableExecuteCommand - if parameters.Group != nil { - config["group"] = aws.ToString(parameters.Group) - } - if parameters.LaunchType != "" { - config["launch_type"] = parameters.LaunchType - } - if parameters.PlatformVersion != nil { - config["platform_version"] = aws.ToString(parameters.PlatformVersion) - } - if parameters.PropagateTags != "" { - config["propagate_tags"] = parameters.PropagateTags - } - if parameters.ReferenceId != nil { - config["reference_id"] = aws.ToString(parameters.ReferenceId) - } - if parameters.TaskCount != nil { - config["task_count"] = aws.ToInt32(parameters.TaskCount) +func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetEventBridgeEventBusParameters) map[string]interface{} { + if apiObject == nil { + return nil } - var capacityProviderStrategyValues []map[string]interface{} - for _, value := range parameters.CapacityProviderStrategy { - strategy := make(map[string]interface{}) - strategy["capacity_provider"] = aws.ToString(value.CapacityProvider) - strategy["base"] = value.Base - strategy["weight"] = value.Weight - capacityProviderStrategyValues = append(capacityProviderStrategyValues, strategy) - } - config["capacity_provider_strategy"] = capacityProviderStrategyValues + tfMap := map[string]interface{}{} - var placementConstraintsValues []map[string]interface{} - for _, value := range parameters.PlacementConstraints { - constraint := make(map[string]interface{}) - constraint["expression"] = aws.ToString(value.Expression) - constraint["type"] = value.Type - placementConstraintsValues = append(placementConstraintsValues, constraint) + if v := apiObject.DetailType; v != nil { + tfMap["detail_type"] = aws.ToString(v) } - config["placement_constraints"] = placementConstraintsValues - var placementStrategyValues []map[string]interface{} - for _, value := range parameters.PlacementStrategy { - strategy := make(map[string]interface{}) - strategy["field"] = aws.ToString(value.Field) - strategy["type"] = value.Type - placementStrategyValues = append(placementStrategyValues, strategy) + if v := apiObject.EndpointId; v != nil { + tfMap["endpoint_id"] = aws.ToString(v) } - config["placement_strategy"] = placementStrategyValues - var tagValues []map[string]interface{} - for _, tag := range parameters.Tags { - t := make(map[string]interface{}) - t["key"] = aws.ToString(tag.Key) - t["value"] = aws.ToString(tag.Value) - tagValues = append(tagValues, t) + if v := apiObject.Resources; v != nil { + tfMap["resources"] = v } - config["tags"] = tagValues - if parameters.NetworkConfiguration != nil { - config["network_configuration"] = flattenTargetNetworkConfiguration(parameters.NetworkConfiguration) + if v := apiObject.Source; v != nil { + tfMap["source"] = aws.ToString(v) } - if parameters.Overrides != nil { - config["overrides"] = flattenTargetECSTaskOverrides(parameters.Overrides) + if v := apiObject.Time; v != nil { + tfMap["time"] = aws.ToString(v) } - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetNetworkConfiguration(parameters *types.NetworkConfiguration) []map[string]interface{} { - config := make(map[string]interface{}) +func flattenPipeTargetHttpParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { + if apiObject == nil { + return nil + } - if parameters.AwsvpcConfiguration != nil { - awsVpcConfiguration := make(map[string]interface{}) - awsVpcConfiguration["assign_public_ip"] = parameters.AwsvpcConfiguration.AssignPublicIp + tfMap := map[string]interface{}{} - if parameters.AwsvpcConfiguration.SecurityGroups != nil { - awsVpcConfiguration["security_groups"] = flex.FlattenStringValueSet(parameters.AwsvpcConfiguration.SecurityGroups) - } + if v := apiObject.HeaderParameters; v != nil { + tfMap["header_parameters"] = v + } - if parameters.AwsvpcConfiguration.Subnets != nil { - awsVpcConfiguration["subnets"] = flex.FlattenStringValueSet(parameters.AwsvpcConfiguration.Subnets) - } + if v := apiObject.PathParameterValues; v != nil { + tfMap["path_parameter_values"] = v + } - config["aws_vpc_configuration"] = []map[string]interface{}{awsVpcConfiguration} + if v := apiObject.QueryStringParameters; v != nil { + tfMap["query_string_parameters"] = v } - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetECSTaskOverrides(parameters *types.EcsTaskOverride) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.Cpu != nil { - config["cpu"] = aws.ToString(parameters.Cpu) - } - if parameters.Memory != nil { - config["memory"] = aws.ToString(parameters.Memory) - } - if parameters.ExecutionRoleArn != nil { - config["execution_role_arn"] = aws.ToString(parameters.ExecutionRoleArn) - } - if parameters.TaskRoleArn != nil { - config["task_role_arn"] = aws.ToString(parameters.TaskRoleArn) +func flattenPipeTargetKinesisStreamParameters(apiObject *types.PipeTargetKinesisStreamParameters) map[string]interface{} { + if apiObject == nil { + return nil } - if parameters.EphemeralStorage != nil { - ecsEphemeralStorageConfig := make(map[string]interface{}) - ecsEphemeralStorageConfig["size_in_gib"] = parameters.EphemeralStorage.SizeInGiB - config["ecs_ephemeral_storage"] = []map[string]interface{}{ecsEphemeralStorageConfig} + tfMap := map[string]interface{}{} + + if v := apiObject.PartitionKey; v != nil { + tfMap["partition_key"] = aws.ToString(v) } - var inferenceAcceleratorOverridesValues []map[string]interface{} - for _, value := range parameters.InferenceAcceleratorOverrides { - override := make(map[string]interface{}) - override["device_name"] = aws.ToString(value.DeviceName) - override["device_type"] = aws.ToString(value.DeviceType) - inferenceAcceleratorOverridesValues = append(inferenceAcceleratorOverridesValues, override) + return tfMap +} + +func flattenPipeTargetLambdaFunctionParameters(apiObject *types.PipeTargetLambdaFunctionParameters) map[string]interface{} { + if apiObject == nil { + return nil } - config["inference_accelerator_overrides"] = inferenceAcceleratorOverridesValues - var overridesValues []map[string]interface{} - for _, value := range parameters.ContainerOverrides { - override := flattenTargetECSTaskOverrideContainerOverride(value) - overridesValues = append(overridesValues, override) + tfMap := map[string]interface{}{} + + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v } - config["container_overrides"] = overridesValues - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetECSTaskOverrideContainerOverride(parameters types.EcsContainerOverride) map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.Cpu != nil { - config["cpu"] = aws.ToInt32(parameters.Cpu) - } - if parameters.Memory != nil { - config["memory"] = aws.ToInt32(parameters.Memory) +func flattenPipeTargetRedshiftDataParameters(apiObject *types.PipeTargetRedshiftDataParameters) map[string]interface{} { + if apiObject == nil { + return nil } - if parameters.MemoryReservation != nil { - config["memory_reservation"] = aws.ToInt32(parameters.MemoryReservation) + + tfMap := map[string]interface{}{ + "with_event": apiObject.WithEvent, } - if parameters.Name != nil { - config["name"] = aws.ToString(parameters.Name) + + if v := apiObject.Database; v != nil { + tfMap["database"] = aws.ToString(v) } - if parameters.Command != nil { - config["command"] = flex.FlattenStringValueSet(parameters.Command) + + if v := apiObject.DbUser; v != nil { + tfMap["db_user"] = aws.ToString(v) } - var environmentValues []map[string]interface{} - for _, value := range parameters.Environment { - env := make(map[string]interface{}) - env["name"] = aws.ToString(value.Name) - env["value"] = aws.ToString(value.Value) - environmentValues = append(environmentValues, env) + if v := apiObject.SecretManagerArn; v != nil { + tfMap["secret_manager_arn"] = aws.ToString(v) } - config["environment"] = environmentValues - var environmentFileValues []map[string]interface{} - for _, value := range parameters.EnvironmentFiles { - env := make(map[string]interface{}) - env["type"] = value.Type - env["value"] = aws.ToString(value.Value) - environmentFileValues = append(environmentFileValues, env) + if v := apiObject.Sqls; v != nil { + tfMap["sqls"] = v } - config["environment_files"] = environmentFileValues - var resourceRequirementsValues []map[string]interface{} - for _, value := range parameters.ResourceRequirements { - rr := make(map[string]interface{}) - rr["type"] = value.Type - rr["value"] = aws.ToString(value.Value) - resourceRequirementsValues = append(resourceRequirementsValues, rr) + if v := apiObject.StatementName; v != nil { + tfMap["statement_name"] = aws.ToString(v) } - config["resource_requirements"] = resourceRequirementsValues - return config + return tfMap } -func flattenTargetEventBridgeEventBusParameters(parameters *types.PipeTargetEventBridgeEventBusParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.DetailType != nil { - config["detail_type"] = aws.ToString(parameters.DetailType) - } - if parameters.EndpointId != nil { - config["endpoint_id"] = aws.ToString(parameters.EndpointId) - } - if parameters.Source != nil { - config["source"] = aws.ToString(parameters.Source) - } - if parameters.Resources != nil { - config["resources"] = flex.FlattenStringValueSet(parameters.Resources) +func flattenPipeTargetSageMakerPipelineParameters(apiObject *types.PipeTargetSageMakerPipelineParameters) map[string]interface{} { + if apiObject == nil { + return nil } - if parameters.Time != nil { - config["time"] = aws.ToString(parameters.Time) + + tfMap := map[string]interface{}{} + + if v := apiObject.PipelineParameterList; v != nil { + tfMap["pipeline_parameter"] = flattenSageMakerPipelineParameters(v) } - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetHttpParameters(parameters *types.PipeTargetHttpParameters) []map[string]interface{} { - config := make(map[string]interface{}) +func flattenSageMakerPipelineParameter(apiObject types.SageMakerPipelineParameter) map[string]interface{} { + tfMap := map[string]interface{}{} - var headerParameters []map[string]interface{} - for key, value := range parameters.HeaderParameters { - header := make(map[string]interface{}) - header["key"] = key - header["value"] = value - headerParameters = append(headerParameters, header) + if v := apiObject.Name; v != nil { + tfMap["name"] = aws.ToString(v) } - config["header"] = headerParameters - var queryStringParameters []map[string]interface{} - for key, value := range parameters.QueryStringParameters { - queryString := make(map[string]interface{}) - queryString["key"] = key - queryString["value"] = value - queryStringParameters = append(queryStringParameters, queryString) + if v := apiObject.Value; v != nil { + tfMap["value"] = aws.ToString(v) } - config["query_string"] = queryStringParameters - config["path_parameters"] = flex.FlattenStringValueList(parameters.PathParameterValues) - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetKinesisStreamParameters(parameters *types.PipeTargetKinesisStreamParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.PartitionKey != nil { - config["partition_key"] = aws.ToString(parameters.PartitionKey) +func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelineParameter) []interface{} { + if len(apiObjects) == 0 { + return nil } - result := []map[string]interface{}{config} - return result -} - -func flattenTargetLambdaFunctionParameters(parameters *types.PipeTargetLambdaFunctionParameters) []map[string]interface{} { - config := make(map[string]interface{}) + var tfList []interface{} - if parameters.InvocationType != "" { - config["invocation_type"] = parameters.InvocationType + for _, apiObject := range apiObjects { + tfList = append(tfList, flattenSageMakerPipelineParameter(apiObject)) } - result := []map[string]interface{}{config} - return result + return tfList } -func flattenTargetRedshiftDataParameters(parameters *types.PipeTargetRedshiftDataParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.Database != nil { - config["database"] = aws.ToString(parameters.Database) - } - if parameters.DbUser != nil { - config["database_user"] = aws.ToString(parameters.DbUser) - } - if parameters.SecretManagerArn != nil { - config["secret_manager_arn"] = aws.ToString(parameters.SecretManagerArn) - } - if parameters.StatementName != nil { - config["statement_name"] = aws.ToString(parameters.StatementName) - } - config["with_event"] = parameters.WithEvent - if parameters.Sqls != nil { - config["sqls"] = flex.FlattenStringValueSet(parameters.Sqls) +func flattenPipeTargetSqsQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { + if apiObject == nil { + return nil } - result := []map[string]interface{}{config} - return result -} + tfMap := map[string]interface{}{} -func flattenTargetSageMakerPipelineParameters(parameters *types.PipeTargetSageMakerPipelineParameters) []map[string]interface{} { - config := make(map[string]interface{}) + if v := apiObject.MessageDeduplicationId; v != nil { + tfMap["message_deduplication_id"] = aws.ToString(v) + } - if len(parameters.PipelineParameterList) != 0 { - var params []map[string]interface{} - for _, param := range parameters.PipelineParameterList { - item := make(map[string]interface{}) - item["name"] = aws.ToString(param.Name) - item["value"] = aws.ToString(param.Value) - params = append(params, item) - } - config["parameters"] = params + if v := apiObject.MessageGroupId; v != nil { + tfMap["message_group_id"] = aws.ToString(v) } - result := []map[string]interface{}{config} - return result + return tfMap } -func flattenTargetSqsQueueParameters(parameters *types.PipeTargetSqsQueueParameters) []map[string]interface{} { - config := make(map[string]interface{}) - - if parameters.MessageDeduplicationId != nil { - config["message_deduplication_id"] = aws.ToString(parameters.MessageDeduplicationId) - } - if parameters.MessageGroupId != nil { - config["message_group_id"] = aws.ToString(parameters.MessageGroupId) +func flattenPipeTargetStateMachineParameters(apiObject *types.PipeTargetStateMachineParameters) map[string]interface{} { + if apiObject == nil { + return nil } - result := []map[string]interface{}{config} - return result -} - -func flattenTargetStepFunctionStateMachineParameters(parameters *types.PipeTargetStateMachineParameters) []map[string]interface{} { - config := make(map[string]interface{}) + tfMap := map[string]interface{}{} - if parameters.InvocationType != "" { - config["invocation_type"] = parameters.InvocationType + if v := apiObject.InvocationType; v != "" { + tfMap["invocation_type"] = v } - result := []map[string]interface{}{config} - return result + return tfMap } -*/ diff --git a/website/docs/r/pipes_pipe.html.markdown b/website/docs/r/pipes_pipe.html.markdown index f6548cd0b49..766d22abaa0 100644 --- a/website/docs/r/pipes_pipe.html.markdown +++ b/website/docs/r/pipes_pipe.html.markdown @@ -322,7 +322,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `batch_job_parameters` - (Optional) The parameters for using an AWS Batch job as a target. Detailed below. * `cloudwatch_logs_parameters` - (Optional) The parameters for using an CloudWatch Logs log stream as a target. Detailed below. -* `ecs_task` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. +* `ecs_task_parameters` - (Optional) The parameters for using an Amazon ECS task as a target. Detailed below. * `eventbridge_event_bus_parameters` - (Optional) The parameters for using an EventBridge event bus as a target. Detailed below. * `http_parameters` - (Optional) These are custom parameter to be used when the target is an API Gateway REST APIs or EventBridge ApiDestinations. Detailed below. * `input_template` - (Optional) Valid JSON text passed to the target. In this case, nothing from the event itself is passed to the target. Maximum length of 8192 characters. @@ -359,7 +359,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. * `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. -###### target_parameters.batch_job_parameters.container_overrides.resource_requirements Configuration Block +###### target_parameters.batch_job_parameters.container_overrides.resource_requirement Configuration Block * `type` - (Optional) The type of resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU. * `value` - (Optional) The quantity of the specified resource to reserve for the container. [The values vary based on the type specified](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/API_BatchResourceRequirement.html). @@ -378,7 +378,7 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `log_stream_name` - (Optional) The name of the log stream. * `timestamp` - (Optional) The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. This is the JSON path to the field in the event e.g. $.detail.timestamp -#### target_parameters.ecs_task Configuration Block +#### target_parameters.ecs_task_parameters Configuration Block * `capacity_provider_strategy` - (Optional) List of capacity provider strategies to use for the task. If a capacityProviderStrategy is specified, the launchType parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used. Detailed below. * `enable_ecs_managed_tags` - (Optional) Specifies whether to enable Amazon ECS managed tags for the task. Valid values: true, false. @@ -387,91 +387,86 @@ You can find out more about EventBridge Pipes Targets in the [User Guide](https: * `launch_type` - (Optional) Specifies the launch type on which your task is running. The launch type that you specify here must match one of the launch type (compatibilities) of the target task. The FARGATE value is supported only in the Regions where AWS Fargate with Amazon ECS is supported. Valid Values: EC2, FARGATE, EXTERNAL * `network_configuration` - (Optional) Use this structure if the Amazon ECS task uses the awsvpc network mode. This structure specifies the VPC subnets and security groups associated with the task, and whether a public IP address is to be used. This structure is required if LaunchType is FARGATE because the awsvpc mode is required for Fargate tasks. If you specify NetworkConfiguration when the target ECS task does not use the awsvpc network mode, the task fails. Detailed below. * `overrides` - (Optional) The overrides that are associated with a task. Detailed below. -* `placement_constraints` - (Optional) An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). Detailed below. +* `placement_constraint` - (Optional) An array of placement constraint objects to use for the task. You can specify up to 10 constraints per task (including constraints in the task definition and those specified at runtime). Detailed below. * `placement_strategy` - (Optional) The placement strategy objects to use for the task. You can specify a maximum of five strategy rules per task. Detailed below. * `platform_version` - (Optional) Specifies the platform version for the task. Specify only the numeric portion of the platform version, such as 1.1.0. This structure is used only if LaunchType is FARGATE. * `propagate_tags` - (Optional) Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags are not propagated. Tags can only be propagated to the task during task creation. To add tags to a task after task creation, use the TagResource API action. Valid Values: TASK_DEFINITION * `reference_id` - (Optional) The reference ID to use for the task. Maximum length of 1,024. -* `tags` - (Optional) The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. Detailed below. +* `tags` - (Optional) Key-value map of tags that you apply to the task to help you categorize and organize them. * `task_count` - (Optional) The number of tasks to create based on TaskDefinition. The default is 1. * `task_definition_arn` - (Optional) The ARN of the task definition to use if the event target is an Amazon ECS task. -##### target_parameters.ecs_task.capacity_provider_strategy Configuration Block +##### target_parameters.ecs_task_parameters.capacity_provider_strategy Configuration Block * `base` - (Optional) The base value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a base defined. If no value is specified, the default value of 0 is used. Maximum value of 100,000. * `capacity_provider` - (Optional) The short name of the capacity provider. Maximum value of 255. * `weight` - (Optional) The weight value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The weight value is taken into consideration after the base value, if defined, is satisfied. Maximum value of 1,000. -##### target_parameters.ecs_task.network_configuration Configuration Block +##### target_parameters.ecs_task_parameters.network_configuration Configuration Block * `aws_vpc_configuration` - (Optional) Use this structure to specify the VPC subnets and security groups for the task, and whether a public IP address is to be used. This structure is relevant only for ECS tasks that use the awsvpc network mode. Detailed below. -###### target_parameters.ecs_task.network_configuration.aws_vpc_configuration Configuration Block +###### target_parameters.ecs_task_parameters.network_configuration.aws_vpc_configuration Configuration Block * `assign_public_ip` - (Optional) Specifies whether the task's elastic network interface receives a public IP address. You can specify ENABLED only when LaunchType in EcsParameters is set to FARGATE. Valid Values: ENABLED, DISABLED. * `security_groups` - (Optional) Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used. * `subnets` - (Optional) Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets. -##### target_parameters.ecs_task.overrides Configuration Block +##### target_parameters.ecs_task_parameters.overrides Configuration Block -* `container_overrides` - (Optional) One or more container overrides that are sent to a task. Detailed below. +* `container_override` - (Optional) One or more container overrides that are sent to a task. Detailed below. * `cpu` - (Optional) The cpu override for the task. -* `ecs_ephemeral_storage` - (Optional) The ephemeral storage setting override for the task. Detailed below. +* `ephemeral_storage` - (Optional) The ephemeral storage setting override for the task. Detailed below. * `execution_role_arn` - (Optional) The Amazon Resource Name (ARN) of the task execution IAM role override for the task. -* `inference_accelerator_overrides` - (Optional) List of Elastic Inference accelerator overrides for the task. Detailed below. +* `inference_accelerator_override` - (Optional) List of Elastic Inference accelerator overrides for the task. Detailed below. * `memory` - (Optional) The memory override for the task. * `task_role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role. -###### target_parameters.ecs_task.overrides.container_overrides Configuration Block +###### target_parameters.ecs_task_parameters.overrides.container_override Configuration Block * `command` - (Optional) List of commands to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name. * `cpu` - (Optional) The number of cpu units reserved for the container, instead of the default value from the task definition. You must also specify a container name. * `environment` - (Optional) The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the task definition. You must also specify a container name. Detailed below. -* `environment_files` - (Optional) A list of files containing the environment variables to pass to a container, instead of the value from the container definition. Detailed below. +* `environment_file` - (Optional) A list of files containing the environment variables to pass to a container, instead of the value from the container definition. Detailed below. * `memory` - (Optional) The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition. If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name. * `memory_reservation` - (Optional) The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition. You must also specify a container name. * `name` - (Optional) The name of the container that receives the override. This parameter is required if any override is specified. -* `resource_requirements` - (Optional) The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU. Detailed below. +* `resource_requirement` - (Optional) The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU. Detailed below. -###### target_parameters.ecs_task.overrides.container_overrides.environment Configuration Block +###### target_parameters.ecs_task_parameters.overrides.container_override.environment Configuration Block * `name` - (Optional) The name of the key-value pair. For environment variables, this is the name of the environment variable. * `value` - (Optional) The value of the key-value pair. For environment variables, this is the value of the environment variable. -###### target_parameters.ecs_task.overrides.container_overrides.environment_files Configuration Block +###### target_parameters.ecs_task_parameters.overrides.container_override.environment_file Configuration Block * `type` - (Optional) The file type to use. The only supported value is s3. * `value` - (Optional) The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file. -###### target_parameters.ecs_task.overrides.container_overrides.resource_requirements Configuration Block +###### target_parameters.ecs_task_parameters.overrides.container_override.resource_requirement Configuration Block * `type` - (Optional) The type of resource to assign to a container. The supported values are GPU or InferenceAccelerator. * `value` - (Optional) The value for the specified resource type. If the GPU type is used, the value is the number of physical GPUs the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on. If the InferenceAccelerator type is used, the value matches the deviceName for an InferenceAccelerator specified in a task definition. -###### target_parameters.ecs_task.overrides.ecs_ephemeral_storage Configuration Block +###### target_parameters.ecs_task_parameters.overrides.ephemeral_storage Configuration Block * `size_in_gib` - (Required) The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is 21 GiB and the maximum supported value is 200 GiB. -###### target_parameters.ecs_task.overrides.inference_accelerator_overrides Configuration Block +###### target_parameters.ecs_task_parameters.overrides.inference_accelerator_override Configuration Block * `device_name` - (Optional) The Elastic Inference accelerator device name to override for the task. This parameter must match a deviceName specified in the task definition. * `device_type` - (Optional) The Elastic Inference accelerator type to use. -##### target_parameters.ecs_task.placement_constraints Configuration Block +##### target_parameters.ecs_task_parameters.placement_constraint Configuration Block * `expression` - (Optional) A cluster query language expression to apply to the constraint. You cannot specify an expression if the constraint type is distinctInstance. Maximum length of 2,000. * `type` - (Optional) The type of constraint. Use distinctInstance to ensure that each task in a particular group is running on a different container instance. Use memberOf to restrict the selection to a group of valid candidates. Valid Values: distinctInstance, memberOf. -##### target_parameters.ecs_task.placement_strategy Configuration Block +##### target_parameters.ecs_task_parameters.placement_strategy Configuration Block * `field` - (Optional) The field to apply the placement strategy against. For the spread placement strategy, valid values are instanceId (or host, which has the same effect), or any platform or custom attribute that is applied to a container instance, such as attribute:ecs.availability-zone. For the binpack placement strategy, valid values are cpu and memory. For the random placement strategy, this field is not used. Maximum length of 255. * `type` - (Optional) The type of placement strategy. The random placement strategy randomly places tasks on available candidates. The spread placement strategy spreads placement across available candidates evenly based on the field parameter. The binpack strategy places tasks on available candidates that have the least available amount of the resource that is specified with the field parameter. For example, if you binpack on memory, a task is placed on the instance with the least amount of remaining memory (but still enough to run the task). Valid Values: random, spread, binpack. -##### target_parameters.ecs_task.tags Configuration Block - -* `key` - (Optional) A string you can use to assign a value. The combination of tag keys and values can help you organize and categorize your resources. Maximum length of 128. -* `value` - (Optional) The value for the specified tag key. Maximum length of 256. - #### target_parameters.eventbridge_event_bus_parameters Configuration Block * `detail_type` - (Optional) A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail. From ccd8ab62107dfcbb4f3a470eedf2ade64b02382a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 13:29:27 -0400 Subject: [PATCH 34/65] r/aws_pipes_pipe: Consolidate source files. --- internal/service/pipes/exports_test.go | 8 ++ internal/service/pipes/find.go | 37 ------ internal/service/pipes/pipe.go | 120 +++++++++++++++++- internal/service/pipes/service_package_gen.go | 2 +- internal/service/pipes/status.go | 40 ------ internal/service/pipes/wait.go | 70 ---------- 6 files changed, 127 insertions(+), 150 deletions(-) create mode 100644 internal/service/pipes/exports_test.go delete mode 100644 internal/service/pipes/find.go delete mode 100644 internal/service/pipes/status.go delete mode 100644 internal/service/pipes/wait.go diff --git a/internal/service/pipes/exports_test.go b/internal/service/pipes/exports_test.go new file mode 100644 index 00000000000..9ed928204bd --- /dev/null +++ b/internal/service/pipes/exports_test.go @@ -0,0 +1,8 @@ +package pipes + +// Exports for use in tests only. +var ( + FindPipeByName = findPipeByName + + ResourcePipe = resourcePipe +) diff --git a/internal/service/pipes/find.go b/internal/service/pipes/find.go deleted file mode 100644 index 7a3e3a59539..00000000000 --- a/internal/service/pipes/find.go +++ /dev/null @@ -1,37 +0,0 @@ -package pipes - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipes.DescribePipeOutput, error) { - input := &pipes.DescribePipeInput{ - Name: aws.String(name), - } - - output, err := conn.DescribePipe(ctx, input) - - if errs.IsA[*types.NotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Arn == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 11030410397..36ae324853e 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -3,6 +3,7 @@ package pipes import ( "context" "log" + "errors" "regexp" "time" @@ -11,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -25,7 +27,7 @@ import ( // @SDKResource("aws_pipes_pipe", name="Pipe") // @Tags(identifierAttribute="arn") -func ResourcePipe() *schema.Resource { +func resourcePipe() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePipeCreate, ReadWithoutTimeout: resourcePipeRead, @@ -170,7 +172,7 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { conn := meta.(*conns.AWSClient).PipesClient() - output, err := FindPipeByName(ctx, conn, d.Id()) + output, err := findPipeByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EventBridge Pipes Pipe (%s) not found, removing from state", d.Id()) @@ -290,3 +292,117 @@ func resourcePipeDelete(ctx context.Context, d *schema.ResourceData, meta interf return nil } + +func findPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipes.DescribePipeOutput, error) { + input := &pipes.DescribePipeInput{ + Name: aws.String(name), + } + + output, err := conn.DescribePipe(ctx, input) + + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Arn == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +const ( + pipeStatusRunning = string(types.PipeStateRunning) + pipeStatusStopped = string(types.PipeStateStopped) + pipeStatusCreating = string(types.PipeStateCreating) + pipeStatusUpdating = string(types.PipeStateUpdating) + pipeStatusDeleting = string(types.PipeStateDeleting) + pipeStatusStarting = string(types.PipeStateStarting) + pipeStatusStopping = string(types.PipeStateStopping) + pipeStatusCreateFailed = string(types.PipeStateCreateFailed) + pipeStatusUpdateFailed = string(types.PipeStateUpdateFailed) + pipeStatusStartFailed = string(types.PipeStateStartFailed) + pipeStatusStopFailed = string(types.PipeStateStopFailed) +) + +func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindPipeByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.CurrentState), nil + } +} + + +func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{pipeStatusCreating}, + Target: []string{pipeStatusRunning, pipeStatusStopped}, + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 1, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} + +func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{pipeStatusUpdating}, + Target: []string{pipeStatusRunning, pipeStatusStopped}, + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 1, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} + +func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{pipeStatusDeleting}, + Target: []string{}, + Refresh: statusPipe(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) + + return output, err + } + + return nil, err +} diff --git a/internal/service/pipes/service_package_gen.go b/internal/service/pipes/service_package_gen.go index 8590ac722e9..5cf27d843e6 100644 --- a/internal/service/pipes/service_package_gen.go +++ b/internal/service/pipes/service_package_gen.go @@ -26,7 +26,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourcePipe, + Factory: resourcePipe, TypeName: "aws_pipes_pipe", Name: "Pipe", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/pipes/status.go b/internal/service/pipes/status.go deleted file mode 100644 index 4c51772f949..00000000000 --- a/internal/service/pipes/status.go +++ /dev/null @@ -1,40 +0,0 @@ -package pipes - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -const ( - pipeStatusRunning = string(types.PipeStateRunning) - pipeStatusStopped = string(types.PipeStateStopped) - pipeStatusCreating = string(types.PipeStateCreating) - pipeStatusUpdating = string(types.PipeStateUpdating) - pipeStatusDeleting = string(types.PipeStateDeleting) - pipeStatusStarting = string(types.PipeStateStarting) - pipeStatusStopping = string(types.PipeStateStopping) - pipeStatusCreateFailed = string(types.PipeStateCreateFailed) - pipeStatusUpdateFailed = string(types.PipeStateUpdateFailed) - pipeStatusStartFailed = string(types.PipeStateStartFailed) - pipeStatusStopFailed = string(types.PipeStateStopFailed) -) - -func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindPipeByName(ctx, conn, name) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.CurrentState), nil - } -} diff --git a/internal/service/pipes/wait.go b/internal/service/pipes/wait.go deleted file mode 100644 index 16f5cf03146..00000000000 --- a/internal/service/pipes/wait.go +++ /dev/null @@ -1,70 +0,0 @@ -package pipes - -import ( - "context" - "errors" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusCreating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 1, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} - -func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusUpdating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 1, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} - -func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusDeleting}, - Target: []string{}, - Refresh: statusPipe(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*pipes.DescribePipeOutput); ok { - tfresource.SetLastError(err, errors.New(aws.ToString(output.StateReason))) - - return output, err - } - - return nil, err -} From 24b6f1a29298896dc94d7cea1b3f7994641f2aca Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:42:16 -0400 Subject: [PATCH 35/65] r/aws_pipes_pipe: Remove defaults from 'target_parameters'. --- internal/service/pipes/target_parameters.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index ae2414d1b70..25aeaa14406 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -216,7 +216,6 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100000), - Default: 0, }, "capacity_provider": { Type: schema.TypeString, @@ -227,7 +226,6 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 1000), - Default: 0, }, }, }, @@ -235,12 +233,10 @@ func targetParametersSchema() *schema.Schema { "enable_ecs_managed_tags": { Type: schema.TypeBool, Optional: true, - Default: false, }, "enable_execute_command": { Type: schema.TypeBool, Optional: true, - Default: false, }, "group": { Type: schema.TypeString, @@ -494,7 +490,7 @@ func targetParametersSchema() *schema.Schema { "task_count": { Type: schema.TypeInt, Optional: true, - Default: 1, + Computed: true, }, "task_definition_arn": { Type: schema.TypeString, @@ -706,7 +702,6 @@ func targetParametersSchema() *schema.Schema { "with_event": { Type: schema.TypeBool, Optional: true, - Default: false, }, }, }, From 09188e08d8fcd2e52b118bf288b8b4352f32fa7e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:49:07 -0400 Subject: [PATCH 36/65] Revert "r/aws_pipes_pipe: Remove defaults from 'target_parameters'." This reverts commit 24b6f1a29298896dc94d7cea1b3f7994641f2aca. --- internal/service/pipes/target_parameters.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 25aeaa14406..ae2414d1b70 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -216,6 +216,7 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100000), + Default: 0, }, "capacity_provider": { Type: schema.TypeString, @@ -226,6 +227,7 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 1000), + Default: 0, }, }, }, @@ -233,10 +235,12 @@ func targetParametersSchema() *schema.Schema { "enable_ecs_managed_tags": { Type: schema.TypeBool, Optional: true, + Default: false, }, "enable_execute_command": { Type: schema.TypeBool, Optional: true, + Default: false, }, "group": { Type: schema.TypeString, @@ -490,7 +494,7 @@ func targetParametersSchema() *schema.Schema { "task_count": { Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 1, }, "task_definition_arn": { Type: schema.TypeString, @@ -702,6 +706,7 @@ func targetParametersSchema() *schema.Schema { "with_event": { Type: schema.TypeBool, Optional: true, + Default: false, }, }, }, From 0e90c635248b44f2d8caa11044e659a25f57701e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:54:36 -0400 Subject: [PATCH 37/65] r/aws_pipes_pipe: Use 'reflect.ValueOf().IsZero()' to detect empty results. --- internal/service/pipes/pipe.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index c6cf8bfcb17..a3e9e9d6ad4 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -4,6 +4,7 @@ import ( "context" "errors" "log" + "reflect" "regexp" "time" @@ -188,8 +189,8 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("description", output.Description) d.Set("desired_state", output.DesiredState) d.Set("enrichment", output.Enrichment) - if output.EnrichmentParameters != nil { - if err := d.Set("enrichment_parameters", []interface{}{flattenPipeEnrichmentParameters(output.EnrichmentParameters)}); err != nil { + if v := output.EnrichmentParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if err := d.Set("enrichment_parameters", []interface{}{flattenPipeEnrichmentParameters(v)}); err != nil { return diag.Errorf("setting enrichment_parameters: %s", err) } } else { @@ -199,16 +200,16 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) d.Set("role_arn", output.RoleArn) d.Set("source", output.Source) - if output.SourceParameters != nil { - if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(output.SourceParameters)}); err != nil { + if v := output.SourceParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(v)}); err != nil { return diag.Errorf("setting source_parameters: %s", err) } } else { d.Set("source_parameters", nil) } d.Set("target", output.Target) - if output.TargetParameters != nil { - if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(output.TargetParameters)}); err != nil { + if v := output.TargetParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(v)}); err != nil { return diag.Errorf("setting target_parameters: %s", err) } } else { From f6daaffb1b6c689f64a51ed304ec15a643530038 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:55:28 -0400 Subject: [PATCH 38/65] r/aws_pipes_pipe: Tidy up acceptance tests. --- internal/service/pipes/pipe_test.go | 558 +++++++++++----------------- 1 file changed, 208 insertions(+), 350 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 38fc2db22d9..0fae1b0132e 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -22,11 +22,6 @@ import ( func TestAccPipesPipe_basic(t *testing.T) { ctx := acctest.Context(t) - - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" @@ -43,19 +38,20 @@ func TestAccPipesPipe_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccPipeConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), - resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), ), }, { @@ -69,10 +65,6 @@ func TestAccPipesPipe_basic(t *testing.T) { func TestAccPipesPipe_disappears(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" @@ -101,12 +93,8 @@ func TestAccPipesPipe_disappears(t *testing.T) { func TestAccPipesPipe_description(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -120,7 +108,7 @@ func TestAccPipesPipe_description(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_description(name, "Description 1"), + Config: testAccPipeConfig_description(rName, "Description 1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Description 1"), @@ -132,53 +120,34 @@ func TestAccPipesPipe_description(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_description(name, "Description 2"), + Config: testAccPipeConfig_description(rName, "Description 2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Description 2"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_description(name, ""), + Config: testAccPipeConfig_description(rName, ""), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_desiredState(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -192,7 +161,7 @@ func TestAccPipesPipe_desiredState(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_desiredState(name, "STOPPED"), + Config: testAccPipeConfig_desiredState(rName, "STOPPED"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "STOPPED"), @@ -204,53 +173,34 @@ func TestAccPipesPipe_desiredState(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_desiredState(name, "RUNNING"), + Config: testAccPipeConfig_desiredState(rName, "RUNNING"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_desiredState(name, "STOPPED"), + Config: testAccPipeConfig_desiredState(rName, "STOPPED"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "STOPPED"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_enrichment(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -264,7 +214,7 @@ func TestAccPipesPipe_enrichment(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_enrichment(name, 0), + Config: testAccPipeConfig_enrichment(rName, 0), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.0", "arn"), @@ -276,41 +226,27 @@ func TestAccPipesPipe_enrichment(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_enrichment(name, 1), + Config: testAccPipeConfig_enrichment(rName, 1), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test.1", "arn"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "enrichment", ""), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -324,9 +260,11 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test1"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test1"]}`), ), @@ -337,36 +275,32 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_sourceParameters_filterCriteria2(name, "test1", "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria2(rName, "test1", "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "2"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test1"]}`), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.1.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_sourceParameters_filterCriteria0(name), + Config: testAccPipeConfig_sourceParameters_filterCriteria0(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "0"), ), }, @@ -376,41 +310,31 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_sourceParameters_filterCriteria1(name, "test2"), + Config: testAccPipeConfig_sourceParameters_filterCriteria1(rName, "test2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.0.pattern", `{"source":["test2"]}`), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -424,7 +348,7 @@ func TestAccPipesPipe_nameGenerated(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_nameGenerated(), + Config: testAccPipeConfig_nameGenerated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceAttrNameGenerated(resourceName, "name"), @@ -442,11 +366,8 @@ func TestAccPipesPipe_nameGenerated(t *testing.T) { func TestAccPipesPipe_namePrefix(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -460,7 +381,7 @@ func TestAccPipesPipe_namePrefix(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_namePrefix("tf-acc-test-prefix-"), + Config: testAccPipeConfig_namePrefix(rName, "tf-acc-test-prefix-"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceAttrNameFromPrefix(resourceName, "name", "tf-acc-test-prefix-"), @@ -478,12 +399,8 @@ func TestAccPipesPipe_namePrefix(t *testing.T) { func TestAccPipesPipe_roleARN(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -497,7 +414,7 @@ func TestAccPipesPipe_roleARN(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), @@ -509,29 +426,20 @@ func TestAccPipesPipe_roleARN(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_roleARN(name), + Config: testAccPipeConfig_roleARN(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test2", "arn"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_tags(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -545,7 +453,7 @@ func TestAccPipesPipe_tags(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_tags1(name, "key1", "value1"), + Config: testAccPipeConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -558,7 +466,7 @@ func TestAccPipesPipe_tags(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_tags2(name, "key1", "value1updated", "key2", "value2"), + Config: testAccPipeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), @@ -567,35 +475,21 @@ func TestAccPipesPipe_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_tags1(name, "key2", "value2"), + Config: testAccPipeConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_target(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -609,7 +503,7 @@ func TestAccPipesPipe_target(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), @@ -621,29 +515,20 @@ func TestAccPipesPipe_target(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_target(name), + Config: testAccPipeConfig_target(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target2", "arn"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var pipe pipes.DescribePipeOutput - name := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" resource.ParallelTest(t, resource.TestCase{ @@ -657,7 +542,7 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.first"), + Config: testAccPipeConfig_targetParameters_inputTemplate(rName, "$.first"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.first"), @@ -669,29 +554,19 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_targetParameters_inputTemplate(name, "$.second"), + Config: testAccPipeConfig_targetParameters_inputTemplate(rName, "$.second"), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", "$.second"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccPipeConfig_basic(name), + Config: testAccPipeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.input_template"), ), }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, }, }) } @@ -762,11 +637,14 @@ func testAccPreCheck(ctx context.Context, t *testing.T) { } } -const testAccPipeConfig_base = ` +func testAccPipeConfig_base(rName string) string { + return fmt.Sprintf(` data "aws_caller_identity" "main" {} data "aws_partition" "main" {} resource "aws_iam_role" "test" { + name = %[1]q + assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = { @@ -783,11 +661,15 @@ resource "aws_iam_role" "test" { } }) } -` +`, rName) +} -const testAccPipeConfig_base_sqsSource = ` +func testAccPipeConfig_baseSQSSource(rName string) string { + return fmt.Sprintf(` resource "aws_iam_role_policy" "source" { role = aws_iam_role.test.id + name = "%[1]s-source" + policy = jsonencode({ Version = "2012-10-17" Statement = [ @@ -806,12 +688,18 @@ resource "aws_iam_role_policy" "source" { }) } -resource "aws_sqs_queue" "source" {} -` +resource "aws_sqs_queue" "source" { + name = "%[1]s-source" +} +`, rName) +} -const testAccPipeConfig_base_sqsTarget = ` +func testAccPipeConfig_baseSQSTarget(rName string) string { + return fmt.Sprintf(` resource "aws_iam_role_policy" "target" { role = aws_iam_role.test.id + name = "%[1]s-target" + policy = jsonencode({ Version = "2012-10-17" Statement = [ @@ -828,82 +716,77 @@ resource "aws_iam_role_policy" "target" { }) } -resource "aws_sqs_queue" "target" {} -` +resource "aws_sqs_queue" "target" { + name = "%[1]s-target" +} +`, rName) +} -func testAccPipeConfig_basic(name string) string { +func testAccPipeConfig_basic(rName string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn } -`, name), - ) +`, rName)) } -func testAccPipeConfig_description(name, description string) string { +func testAccPipeConfig_description(rName, description string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn description = %[2]q } -`, name, description), - ) +`, rName, description)) } -func testAccPipeConfig_desiredState(name, state string) string { +func testAccPipeConfig_desiredState(rName, state string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn desired_state = %[2]q } -`, name, state), - ) +`, rName, state)) } -func testAccPipeConfig_enrichment(name string, i int) string { +func testAccPipeConfig_enrichment(rName string, i int) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_cloudwatch_event_connection" "test" { name = %[1]q authorization_type = "API_KEY" + auth_parameters { api_key { key = "testKey" @@ -912,13 +795,9 @@ resource "aws_cloudwatch_event_connection" "test" { } } -locals { - name_prefix = %[1]q -} - resource "aws_cloudwatch_event_api_destination" "test" { count = 2 - name = "${local.name_prefix}-${count.index}" + name = "%[1]s-${count.index}" invocation_endpoint = "https://example.com/${count.index}" http_method = "POST" connection_arn = aws_cloudwatch_event_connection.test.arn @@ -926,32 +805,30 @@ resource "aws_cloudwatch_event_api_destination" "test" { resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn enrichment = aws_cloudwatch_event_api_destination.test[%[2]d].arn } -`, name, i), - ) +`, rName, i)) } -func testAccPipeConfig_sourceParameters_filterCriteria1(name, criteria1 string) string { +func testAccPipeConfig_sourceParameters_filterCriteria1(rName, criteria1 string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn source_parameters { filter_criteria { @@ -962,25 +839,23 @@ resource "aws_pipes_pipe" "test" { } } } - - target_parameters {} } -`, name, criteria1), - ) +`, rName, criteria1)) } -func testAccPipeConfig_sourceParameters_filterCriteria2(name, criteria1, criteria2 string) string { +func testAccPipeConfig_sourceParameters_filterCriteria2(rName, criteria1, criteria2 string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn source_parameters { filter_criteria { @@ -997,18 +872,15 @@ resource "aws_pipes_pipe" "test" { } } } - - target_parameters {} } -`, name, criteria1, criteria2), - ) +`, rName, criteria1, criteria2)) } -func testAccPipeConfig_sourceParameters_filterCriteria0(name string) string { +func testAccPipeConfig_sourceParameters_filterCriteria0(rName string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] @@ -1020,59 +892,53 @@ resource "aws_pipes_pipe" "test" { source_parameters { filter_criteria {} } - - target_parameters {} } -`, name), - ) +`, rName)) } -func testAccPipeConfig_nameGenerated() string { +func testAccPipeConfig_nameGenerated(rName string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), ` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn } `, ) } -func testAccPipeConfig_namePrefix(namePrefix string) string { +func testAccPipeConfig_namePrefix(rName, namePrefix string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + name_prefix = %[1]q role_arn = aws_iam_role.test.arn source = aws_sqs_queue.source.arn target = aws_sqs_queue.target.arn - - source_parameters {} - target_parameters {} } -`, namePrefix), - ) +`, namePrefix)) } -func testAccPipeConfig_roleARN(name string) string { +func testAccPipeConfig_roleARN(rName string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_iam_role" "test2" { + name = "%[1]s-2" + assume_role_policy = jsonencode({ Version = "2012-10-17" Statement = { @@ -1092,6 +958,8 @@ resource "aws_iam_role" "test2" { resource "aws_iam_role_policy" "source2" { role = aws_iam_role.test2.id + name = "%[1]s-source2" + policy = jsonencode({ Version = "2012-10-17" Statement = [ @@ -1112,75 +980,68 @@ resource "aws_iam_role_policy" "source2" { resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source2, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test2.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test2.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn } -`, name), - ) +`, rName)) } -func testAccPipeConfig_tags1(name, tag1Key, tag1Value string) string { +func testAccPipeConfig_tags1(rName, tag1Key, tag1Value string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn tags = { %[2]q = %[3]q } } -`, name, tag1Key, tag1Value), - ) +`, rName, tag1Key, tag1Value)) } -func testAccPipeConfig_tags2(name, tag1Key, tag1Value, tag2Key, tag2Value string) string { +func testAccPipeConfig_tags2(rName, tag1Key, tag1Value, tag2Key, tag2Value string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn tags = { %[2]q = %[3]q %[4]q = %[5]q } } -`, name, tag1Key, tag1Value, tag2Key, tag2Value), - ) +`, rName, tag1Key, tag1Value, tag2Key, tag2Value)) } -func testAccPipeConfig_target(name string) string { +func testAccPipeConfig_target(rName string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_iam_role_policy" "target2" { role = aws_iam_role.test.id + name = "%[1]s-target2" + policy = jsonencode({ Version = "2012-10-17" Statement = [ @@ -1197,41 +1058,38 @@ resource "aws_iam_role_policy" "target2" { }) } -resource "aws_sqs_queue" "target2" {} +resource "aws_sqs_queue" "target2" { + name = "%[1]s-target2" +} resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target2] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target2.arn - source_parameters {} - target_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target2.arn } -`, name), - ) +`, rName)) } -func testAccPipeConfig_targetParameters_inputTemplate(name, template string) string { +func testAccPipeConfig_targetParameters_inputTemplate(rName, template string) string { return acctest.ConfigCompose( - testAccPipeConfig_base, - testAccPipeConfig_base_sqsSource, - testAccPipeConfig_base_sqsTarget, + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] - name = %[1]q - role_arn = aws_iam_role.test.arn - source = aws_sqs_queue.source.arn - target = aws_sqs_queue.target.arn - source_parameters {} + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn target_parameters { input_template = %[2]q } } -`, name, template), - ) +`, rName, template)) } From 23db5f7b966bbab381b4cd7c98012e25198dcff7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:56:54 -0400 Subject: [PATCH 39/65] r/aws_pipes_pipe: Remove defaults from 'source_parameters'. --- internal/service/pipes/source_parameters.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 4d8ad82f51a..54dd53d6d94 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -158,7 +158,12 @@ func sourceParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, 10), - Default: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "1" + }, }, "starting_position": { Type: schema.TypeString, @@ -266,7 +271,12 @@ func sourceParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(1, 10), - Default: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "0" + }, }, "starting_position": { Type: schema.TypeString, From b7c73e9a80feba5016787e0686222ad110cc60b3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 14:57:05 -0400 Subject: [PATCH 40/65] r/aws_pipes_pipe: Remove defaults from 'target_parameters'. --- internal/service/pipes/target_parameters.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index ae2414d1b70..0efb6cbb67f 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -216,7 +216,6 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 100000), - Default: 0, }, "capacity_provider": { Type: schema.TypeString, @@ -227,7 +226,6 @@ func targetParametersSchema() *schema.Schema { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 1000), - Default: 0, }, }, }, @@ -235,12 +233,10 @@ func targetParametersSchema() *schema.Schema { "enable_ecs_managed_tags": { Type: schema.TypeBool, Optional: true, - Default: false, }, "enable_execute_command": { Type: schema.TypeBool, Optional: true, - Default: false, }, "group": { Type: schema.TypeString, @@ -494,7 +490,12 @@ func targetParametersSchema() *schema.Schema { "task_count": { Type: schema.TypeInt, Optional: true, - Default: 1, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if new != "" && new != "0" { + return false + } + return old == "1" + }, }, "task_definition_arn": { Type: schema.TypeString, @@ -706,7 +707,6 @@ func targetParametersSchema() *schema.Schema { "with_event": { Type: schema.TypeBool, Optional: true, - Default: false, }, }, }, From 7a53bdcdcbc9b87f25a527f83a3e23d79591624c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 15:06:27 -0400 Subject: [PATCH 41/65] r/aws_pipes_pipe: Some Computed attributes for 'source_parameters'. --- internal/service/pipes/source_parameters.go | 118 ++++---------------- 1 file changed, 22 insertions(+), 96 deletions(-) diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 54dd53d6d94..a161e9d1520 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -26,6 +26,7 @@ func sourceParametersSchema() *schema.Schema { "activemq_broker_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.dynamodb_stream_parameters", @@ -40,13 +41,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "credentials": { Type: schema.TypeList, @@ -65,13 +61,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "queue_name": { Type: schema.TypeString, @@ -88,6 +79,7 @@ func sourceParametersSchema() *schema.Schema { "dynamodb_stream_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", @@ -102,13 +94,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "dead_letter_config": { Type: schema.TypeList, @@ -127,13 +114,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "maximum_record_age_in_seconds": { Type: schema.TypeInt, @@ -157,13 +139,8 @@ func sourceParametersSchema() *schema.Schema { "parallelization_factor": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "1" - }, }, "starting_position": { Type: schema.TypeString, @@ -201,6 +178,7 @@ func sourceParametersSchema() *schema.Schema { "kinesis_stream_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", @@ -215,13 +193,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "dead_letter_config": { Type: schema.TypeList, @@ -240,13 +213,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "maximum_record_age_in_seconds": { Type: schema.TypeInt, @@ -270,13 +238,8 @@ func sourceParametersSchema() *schema.Schema { "parallelization_factor": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "starting_position": { Type: schema.TypeString, @@ -296,6 +259,7 @@ func sourceParametersSchema() *schema.Schema { "managed_streaming_kafka_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", @@ -310,13 +274,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "consumer_group_id": { Type: schema.TypeString, @@ -348,13 +307,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "starting_position": { Type: schema.TypeString, @@ -377,6 +331,7 @@ func sourceParametersSchema() *schema.Schema { "rabbitmq_broker_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", @@ -391,13 +346,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "credentials": { Type: schema.TypeList, @@ -416,13 +366,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "queue_name": { Type: schema.TypeString, @@ -448,6 +393,7 @@ func sourceParametersSchema() *schema.Schema { "self_managed_kafka_parameters": { Type: schema.TypeList, Optional: true, + Computed: true, MaxItems: 1, ConflictsWith: []string{ "source_parameters.0.activemq_broker_parameters", @@ -475,13 +421,8 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "100" - }, }, "consumer_group_id": { Type: schema.TypeString, @@ -524,13 +465,8 @@ func sourceParametersSchema() *schema.Schema { "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, "server_root_ca_certificate": { Type: schema.TypeString, @@ -606,24 +542,14 @@ func sourceParametersSchema() *schema.Schema { "batch_size": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(1, 10000), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "10" - }, }, "maximum_batching_window_in_seconds": { Type: schema.TypeInt, Optional: true, + Computed: true, ValidateFunc: validation.IntBetween(0, 300), - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "0" - }, }, }, }, From 3cdb6b50472d13724d1aaa5e625e2b421abed8da Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 14 Jun 2023 15:07:43 -0400 Subject: [PATCH 42/65] r/aws_pipes_pipe: Remove DiffSuppressFunc from 'target_parameters'. --- internal/service/pipes/target_parameters.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 0efb6cbb67f..db542366c0e 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -490,12 +490,6 @@ func targetParametersSchema() *schema.Schema { "task_count": { Type: schema.TypeInt, Optional: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - if new != "" && new != "0" { - return false - } - return old == "1" - }, }, "task_definition_arn": { Type: schema.TypeString, From d84df7c8ee19c6dd006f1c751bcc10fc6c0d473d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 15 Jun 2023 11:10:01 -0400 Subject: [PATCH 43/65] r/aws_pipes_pipe: Original acceptance tests passing. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_' PKG=pipes ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 2 -run=TestAccPipesPipe_ -timeout 180m === RUN TestAccPipesPipe_basic === PAUSE TestAccPipesPipe_basic === RUN TestAccPipesPipe_disappears === PAUSE TestAccPipesPipe_disappears === RUN TestAccPipesPipe_description === PAUSE TestAccPipesPipe_description === RUN TestAccPipesPipe_desiredState === PAUSE TestAccPipesPipe_desiredState === RUN TestAccPipesPipe_enrichment === PAUSE TestAccPipesPipe_enrichment === RUN TestAccPipesPipe_sourceParameters_filterCriteria === PAUSE TestAccPipesPipe_sourceParameters_filterCriteria === RUN TestAccPipesPipe_nameGenerated === PAUSE TestAccPipesPipe_nameGenerated === RUN TestAccPipesPipe_namePrefix === PAUSE TestAccPipesPipe_namePrefix === RUN TestAccPipesPipe_roleARN === PAUSE TestAccPipesPipe_roleARN === RUN TestAccPipesPipe_tags === PAUSE TestAccPipesPipe_tags === RUN TestAccPipesPipe_target === PAUSE TestAccPipesPipe_target === RUN TestAccPipesPipe_targetParameters_inputTemplate === PAUSE TestAccPipesPipe_targetParameters_inputTemplate === CONT TestAccPipesPipe_basic === CONT TestAccPipesPipe_nameGenerated --- PASS: TestAccPipesPipe_basic (86.05s) === CONT TestAccPipesPipe_tags --- PASS: TestAccPipesPipe_nameGenerated (86.10s) === CONT TestAccPipesPipe_targetParameters_inputTemplate --- PASS: TestAccPipesPipe_tags (134.86s) === CONT TestAccPipesPipe_target --- PASS: TestAccPipesPipe_targetParameters_inputTemplate (168.10s) === CONT TestAccPipesPipe_roleARN --- PASS: TestAccPipesPipe_roleARN (133.22s) === CONT TestAccPipesPipe_namePrefix --- PASS: TestAccPipesPipe_target (167.52s) === CONT TestAccPipesPipe_desiredState --- PASS: TestAccPipesPipe_namePrefix (102.92s) === CONT TestAccPipesPipe_sourceParameters_filterCriteria --- PASS: TestAccPipesPipe_desiredState (192.78s) === CONT TestAccPipesPipe_enrichment --- PASS: TestAccPipesPipe_sourceParameters_filterCriteria (251.37s) === CONT TestAccPipesPipe_description --- PASS: TestAccPipesPipe_enrichment (185.26s) === CONT TestAccPipesPipe_disappears --- PASS: TestAccPipesPipe_disappears (102.22s) --- PASS: TestAccPipesPipe_description (230.86s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 907.864s --- internal/service/pipes/pipe.go | 25 ++++++++++++++------- internal/service/pipes/pipe_test.go | 7 +++--- internal/service/pipes/source_parameters.go | 8 ++++--- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index a3e9e9d6ad4..36e30546a6e 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -228,11 +228,7 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(d.Id()), RoleArn: aws.String(d.Get("role_arn").(string)), - // Reset state in case it's a deletion. - SourceParameters: &types.UpdatePipeSourceParameters{ - FilterCriteria: &types.FilterCriteria{}, - }, - Target: aws.String(d.Get("target").(string)), + Target: aws.String(d.Get("target").(string)), // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. TargetParameters: &types.PipeTargetParameters{ InputTemplate: aws.String(""), @@ -240,9 +236,7 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf } if d.HasChange("enrichment") { - if v, ok := d.GetOk("enrichment"); ok && v.(string) != "" { - input.Enrichment = aws.String(v.(string)) - } + input.Enrichment = aws.String(d.Get("enrichment").(string)) } if v, ok := d.GetOk("enrichment_parameters"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -406,3 +400,18 @@ func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout return nil, err } + +func suppressEmptyConfigurationBlock(key string) schema.SchemaDiffSuppressFunc { + return func(k, o, n string, d *schema.ResourceData) bool { + if k != key+".#" { + return false + } + + if o == "0" && n == "1" { + v := d.Get(key).([]interface{}) + return len(v) == 0 || v[0] == nil || len(v[0].(map[string]interface{})) == 0 + } + + return false + } +} diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 0fae1b0132e..3fc52c826e5 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -48,7 +48,7 @@ func TestAccPipesPipe_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), @@ -300,8 +300,7 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.0.filter.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), ), }, { @@ -324,7 +323,7 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "1"), ), }, }, diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index a161e9d1520..181b70c0b6c 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -155,12 +155,12 @@ func sourceParametersSchema() *schema.Schema { Type: schema.TypeList, Optional: true, MaxItems: 1, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + DiffSuppressFunc: suppressEmptyConfigurationBlock("source_parameters.0.filter_criteria"), Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "filter": { Type: schema.TypeList, - Required: true, + Optional: true, MaxItems: 5, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -618,6 +618,8 @@ func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.Updat if v, ok := tfMap["filter_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.FilterCriteria = expandFilterCriteria(v[0].(map[string]interface{})) + } else { + apiObject.FilterCriteria = &types.FilterCriteria{} } if v, ok := tfMap["kinesis_stream_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -687,7 +689,7 @@ func expandFilters(tfList []interface{}) []types.Filter { apiObject := expandFilter(tfMap) - if apiObject == nil { + if apiObject == nil || apiObject.Pattern == nil { continue } From 02b9d32f9ef5cf585ea16bde8a4a7ec0945b1e29 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 19 Jun 2023 16:23:39 -0400 Subject: [PATCH 44/65] Use 'enum.Slice'. --- internal/service/pipes/pipe.go | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 36e30546a6e..a10be95ce0c 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -313,20 +313,6 @@ func findPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipe return output, nil } -const ( - pipeStatusRunning = string(types.PipeStateRunning) - pipeStatusStopped = string(types.PipeStateStopped) - pipeStatusCreating = string(types.PipeStateCreating) - pipeStatusUpdating = string(types.PipeStateUpdating) - pipeStatusDeleting = string(types.PipeStateDeleting) - pipeStatusStarting = string(types.PipeStateStarting) - pipeStatusStopping = string(types.PipeStateStopping) - pipeStatusCreateFailed = string(types.PipeStateCreateFailed) - pipeStatusUpdateFailed = string(types.PipeStateUpdateFailed) - pipeStatusStartFailed = string(types.PipeStateStartFailed) - pipeStatusStopFailed = string(types.PipeStateStopFailed) -) - func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findPipeByName(ctx, conn, name) @@ -345,8 +331,8 @@ func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.Stat func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusCreating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, + Pending: enum.Slice(types.PipeStateCreating), + Target: enum.Slice(types.PipeStateRunning, types.PipeStateStopped), Refresh: statusPipe(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -365,8 +351,8 @@ func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusUpdating}, - Target: []string{pipeStatusRunning, pipeStatusStopped}, + Pending: enum.Slice(types.PipeStateUpdating), + Target: enum.Slice(types.PipeStateRunning, types.PipeStateStopped), Refresh: statusPipe(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -385,7 +371,7 @@ func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{pipeStatusDeleting}, + Pending: enum.Slice(types.PipeStateDeleting), Target: []string{}, Refresh: statusPipe(ctx, conn, id), Timeout: timeout, From 2c77f7ae7e7119457d7d42f1e96b7160d321ed5c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 20 Jun 2023 13:28:13 -0400 Subject: [PATCH 45/65] 'TestAccPipesPipe_target' -> 'TestAccPipesPipe_targetUpdate'. --- internal/service/pipes/pipe_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 3fc52c826e5..f57b2123259 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -485,7 +485,7 @@ func TestAccPipesPipe_tags(t *testing.T) { }) } -func TestAccPipesPipe_target(t *testing.T) { +func TestAccPipesPipe_targetUpdate(t *testing.T) { ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -514,7 +514,7 @@ func TestAccPipesPipe_target(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPipeConfig_target(rName), + Config: testAccPipeConfig_targetUpdated(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target2", "arn"), @@ -1031,7 +1031,7 @@ resource "aws_pipes_pipe" "test" { `, rName, tag1Key, tag1Value, tag2Key, tag2Value)) } -func testAccPipeConfig_target(rName string) string { +func testAccPipeConfig_targetUpdated(rName string) string { return acctest.ConfigCompose( testAccPipeConfig_base(rName), testAccPipeConfig_baseSQSSource(rName), From a8ba9f9539bad57e8c3a3c238592034b1b89f8af Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 24 Jun 2023 14:39:41 -0400 Subject: [PATCH 46/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_basicKinesis'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_basicKinesis$$\|TestAccPipesPipe_basicSQS' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_basicKinesis$\|TestAccPipesPipe_basicSQS -timeout 180m === RUN TestAccPipesPipe_basicSQS === PAUSE TestAccPipesPipe_basicSQS === RUN TestAccPipesPipe_basicKinesis === PAUSE TestAccPipesPipe_basicKinesis === CONT TestAccPipesPipe_basicSQS === CONT TestAccPipesPipe_basicKinesis --- PASS: TestAccPipesPipe_basicKinesis (73.34s) --- PASS: TestAccPipesPipe_basicSQS (111.07s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 116.599s --- internal/service/pipes/pipe_test.go | 232 ++++++++++++++++++++++++++-- 1 file changed, 221 insertions(+), 11 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index f57b2123259..045ccaca150 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccPipesPipe_basic(t *testing.T) { +func TestAccPipesPipe_basicSQS(t *testing.T) { ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -37,7 +37,7 @@ func TestAccPipesPipe_basic(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), @@ -49,6 +49,16 @@ func TestAccPipesPipe_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.batch_size", "10"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.maximum_batching_window_in_seconds", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), @@ -80,7 +90,7 @@ func TestAccPipesPipe_disappears(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfpipes.ResourcePipe(), resourceName), @@ -134,7 +144,7 @@ func TestAccPipesPipe_description(t *testing.T) { ), }, { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), @@ -187,7 +197,7 @@ func TestAccPipesPipe_desiredState(t *testing.T) { ), }, { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), @@ -233,7 +243,7 @@ func TestAccPipesPipe_enrichment(t *testing.T) { ), }, { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "enrichment", ""), @@ -319,7 +329,7 @@ func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ), }, { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), @@ -413,7 +423,7 @@ func TestAccPipesPipe_roleARN(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), @@ -502,7 +512,7 @@ func TestAccPipesPipe_targetUpdate(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sqs_queue.target", "arn"), @@ -560,7 +570,7 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { ), }, { - Config: testAccPipeConfig_basic(rName), + Config: testAccPipeConfig_basicSQS(rName), Check: resource.ComposeTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), resource.TestCheckNoResourceAttr(resourceName, "target_parameters.0.input_template"), @@ -570,6 +580,79 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { }) } +func TestAccPipesPipe_basicKinesis(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicKinesis(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_kinesis_stream.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_record_age_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.maximum_retry_attempts", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.on_partial_batch_item_failure", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.parallelization_factor", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.starting_position", "LATEST"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.kinesis_stream_parameters.0.starting_position_timestamp", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_kinesis_stream.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.0.partition_key", "test"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -721,7 +804,105 @@ resource "aws_sqs_queue" "target" { `, rName) } -func testAccPipeConfig_basic(rName string) string { +func testAccPipeConfig_baseSQSDeadLetter(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "deadletter" { + role = aws_iam_role.test.id + name = "%[1]s-deadletter" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "sqs:*", + ], + Resource = [ + aws_sqs_queue.deadletter.arn, + ] + }, + ] + }) +} + +resource "aws_sqs_queue" "deadletter" { + name = "%[1]s-deadletter" +} +`, rName) +} + +func testAccPipeConfig_baseKinesisSource(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "kinesis:DescribeStream", + "kinesis:DescribeStreamSummary", + "kinesis:GetRecords", + "kinesis:GetShardIterator", + "kinesis:ListShards", + "kinesis:ListStreams", + "kinesis:SubscribeToShard", + ], + Resource = [ + aws_kinesis_stream.source.arn, + ] + }, + ] + }) +} + +resource "aws_kinesis_stream" "source" { + name = "%[1]s-source" + + stream_mode_details { + stream_mode = "ON_DEMAND" + } +} +`, rName) +} + +func testAccPipeConfig_baseKinesisTarget(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "kinesis:PutRecord", + ], + Resource = [ + aws_kinesis_stream.target.arn, + ] + }, + ] + }) +} + +resource "aws_kinesis_stream" "target" { + name = "%[1]s-target" + + stream_mode_details { + stream_mode = "ON_DEMAND" + } +} +`, rName) +} + +func testAccPipeConfig_basicSQS(rName string) string { return acctest.ConfigCompose( testAccPipeConfig_base(rName), testAccPipeConfig_baseSQSSource(rName), @@ -1092,3 +1273,32 @@ resource "aws_pipes_pipe" "test" { } `, rName, template)) } + +func testAccPipeConfig_basicKinesis(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseKinesisSource(rName), + testAccPipeConfig_baseKinesisTarget(rName), + fmt.Sprintf(` +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_kinesis_stream.source.arn + target = aws_kinesis_stream.target.arn + + source_parameters { + kinesis_stream_parameters { + starting_position = "LATEST" + } + } + + target_parameters { + kinesis_stream_parameters { + partition_key = "test" + } + } +} +`, rName)) +} From a516120c9ce777bb6fc21269805b795e589cece1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 24 Jun 2023 15:00:19 -0400 Subject: [PATCH 47/65] Add 'types.IsZero'. --- internal/types/zero.go | 10 ++++++++ internal/types/zero_test.go | 50 +++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 internal/types/zero.go create mode 100644 internal/types/zero_test.go diff --git a/internal/types/zero.go b/internal/types/zero.go new file mode 100644 index 00000000000..4e5bc37bdd0 --- /dev/null +++ b/internal/types/zero.go @@ -0,0 +1,10 @@ +package types + +import ( + "reflect" +) + +// IsZero returns true if `v` is `nil` or points to the zero value of `T`. +func IsZero[T any](v *T) bool { + return v == nil || reflect.ValueOf(*v).IsZero() +} diff --git a/internal/types/zero_test.go b/internal/types/zero_test.go new file mode 100644 index 00000000000..037bdc5a6a3 --- /dev/null +++ b/internal/types/zero_test.go @@ -0,0 +1,50 @@ +package types + +import ( + "testing" +) + +type AIsZero struct { + Key string + Value int +} + +func TestIsZero(t *testing.T) { + t.Parallel() + + testCases := []struct { + Name string + Ptr *AIsZero + Expected bool + }{ + { + Name: "nil pointer", + Expected: true, + }, + { + Name: "pointer to zero value", + Ptr: &AIsZero{}, + Expected: true, + }, + { + Name: "pointer to non-zero value Key", + Ptr: &AIsZero{Key: "test"}, + }, + { + Name: "pointer to non-zero value Value", + Ptr: &AIsZero{Value: 42}, + }, + } + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.Name, func(t *testing.T) { + got := IsZero(testCase.Ptr) + + if got != testCase.Expected { + t.Errorf("got %t, expected %t", got, testCase.Expected) + } + }) + } +} From 026fde6672d682e0023dcd92f3f72902cbbd4953 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 24 Jun 2023 15:02:01 -0400 Subject: [PATCH 48/65] r/aws_pipes_pipe: Use 'types.IsZero'. --- internal/service/pipes/pipe.go | 34 +++--- internal/service/pipes/pipe_test.go | 173 +++++++++++++++++++++++++++- 2 files changed, 189 insertions(+), 18 deletions(-) diff --git a/internal/service/pipes/pipe.go b/internal/service/pipes/pipe.go index 2a8e13b92f9..65045acd5aa 100644 --- a/internal/service/pipes/pipe.go +++ b/internal/service/pipes/pipe.go @@ -4,13 +4,12 @@ import ( "context" "errors" "log" - "reflect" "regexp" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/aws/aws-sdk-go-v2/service/pipes/types" + awstypes "github.com/aws/aws-sdk-go-v2/service/pipes/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -22,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,8 +60,8 @@ func resourcePipe() *schema.Resource { "desired_state": { Type: schema.TypeString, Optional: true, - Default: string(types.RequestedPipeStateRunning), - ValidateDiagFunc: enum.Validate[types.RequestedPipeState](), + Default: string(awstypes.RequestedPipeStateRunning), + ValidateDiagFunc: enum.Validate[awstypes.RequestedPipeState](), }, "enrichment": { Type: schema.TypeString, @@ -127,7 +127,7 @@ func resourcePipeCreate(ctx context.Context, d *schema.ResourceData, meta interf name := create.Name(d.Get("name").(string), d.Get("name_prefix").(string)) input := &pipes.CreatePipeInput{ - DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), + DesiredState: awstypes.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(name), RoleArn: aws.String(d.Get("role_arn").(string)), Source: aws.String(d.Get("source").(string)), @@ -189,7 +189,7 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("description", output.Description) d.Set("desired_state", output.DesiredState) d.Set("enrichment", output.Enrichment) - if v := output.EnrichmentParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if v := output.EnrichmentParameters; !types.IsZero(v) { if err := d.Set("enrichment_parameters", []interface{}{flattenPipeEnrichmentParameters(v)}); err != nil { return diag.Errorf("setting enrichment_parameters: %s", err) } @@ -200,7 +200,7 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("name_prefix", create.NamePrefixFromName(aws.ToString(output.Name))) d.Set("role_arn", output.RoleArn) d.Set("source", output.Source) - if v := output.SourceParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if v := output.SourceParameters; !types.IsZero(v) { if err := d.Set("source_parameters", []interface{}{flattenPipeSourceParameters(v)}); err != nil { return diag.Errorf("setting source_parameters: %s", err) } @@ -208,7 +208,7 @@ func resourcePipeRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set("source_parameters", nil) } d.Set("target", output.Target) - if v := output.TargetParameters; v != nil && !reflect.ValueOf(*v).IsZero() { + if v := output.TargetParameters; !types.IsZero(v) { if err := d.Set("target_parameters", []interface{}{flattenPipeTargetParameters(v)}); err != nil { return diag.Errorf("setting target_parameters: %s", err) } @@ -225,12 +225,12 @@ func resourcePipeUpdate(ctx context.Context, d *schema.ResourceData, meta interf if d.HasChangesExcept("tags", "tags_all") { input := &pipes.UpdatePipeInput{ Description: aws.String(d.Get("description").(string)), - DesiredState: types.RequestedPipeState(d.Get("desired_state").(string)), + DesiredState: awstypes.RequestedPipeState(d.Get("desired_state").(string)), Name: aws.String(d.Id()), RoleArn: aws.String(d.Get("role_arn").(string)), Target: aws.String(d.Get("target").(string)), // Reset state in case it's a deletion, have to set the input to an empty string otherwise it doesn't get overwritten. - TargetParameters: &types.PipeTargetParameters{ + TargetParameters: &awstypes.PipeTargetParameters{ InputTemplate: aws.String(""), }, } @@ -273,7 +273,7 @@ func resourcePipeDelete(ctx context.Context, d *schema.ResourceData, meta interf Name: aws.String(d.Id()), }) - if errs.IsA[*types.NotFoundException](err) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil } @@ -295,7 +295,7 @@ func findPipeByName(ctx context.Context, conn *pipes.Client, name string) (*pipe output, err := conn.DescribePipe(ctx, input) - if errs.IsA[*types.NotFoundException](err) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -331,8 +331,8 @@ func statusPipe(ctx context.Context, conn *pipes.Client, name string) retry.Stat func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.PipeStateCreating), - Target: enum.Slice(types.PipeStateRunning, types.PipeStateStopped), + Pending: enum.Slice(awstypes.PipeStateCreating), + Target: enum.Slice(awstypes.PipeStateRunning, awstypes.PipeStateStopped), Refresh: statusPipe(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -351,8 +351,8 @@ func waitPipeCreated(ctx context.Context, conn *pipes.Client, id string, timeout func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.PipeStateUpdating), - Target: enum.Slice(types.PipeStateRunning, types.PipeStateStopped), + Pending: enum.Slice(awstypes.PipeStateUpdating), + Target: enum.Slice(awstypes.PipeStateRunning, awstypes.PipeStateStopped), Refresh: statusPipe(ctx, conn, id), Timeout: timeout, NotFoundChecks: 20, @@ -371,7 +371,7 @@ func waitPipeUpdated(ctx context.Context, conn *pipes.Client, id string, timeout func waitPipeDeleted(ctx context.Context, conn *pipes.Client, id string, timeout time.Duration) (*pipes.DescribePipeOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.PipeStateDeleting), + Pending: enum.Slice(awstypes.PipeStateDeleting), Target: []string{}, Refresh: statusPipe(ctx, conn, id), Timeout: timeout, diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 045ccaca150..fd1739134f6 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -580,7 +580,7 @@ func TestAccPipesPipe_targetParameters_inputTemplate(t *testing.T) { }) } -func TestAccPipesPipe_basicKinesis(t *testing.T) { +func TestAccPipesPipe_kinesisSourceAndTarget(t *testing.T) { ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -653,6 +653,78 @@ func TestAccPipesPipe_basicKinesis(t *testing.T) { }) } +func TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicDynamoDBSourceCloudWatchLogsTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_dynamodb_table.source", "stream_arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.dead_letter_config.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_record_age_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.maximum_retry_attempts", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.on_partial_batch_item_failure", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.parallelization_factor", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.0.starting_position", "LATEST"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_log_group.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "target_parameters.0.cloudwatch_logs_parameters.0.log_stream_name", "aws_cloudwatch_log_stream.target", "name"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.0.timestamp", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -1302,3 +1374,102 @@ resource "aws_pipes_pipe" "test" { } `, rName)) } + +func testAccPipeConfig_basicDynamoDBSourceCloudWatchLogsTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:ListStreams", + ], + Resource = [ + aws_dynamodb_table.source.stream_arn, + "${aws_dynamodb_table.source.stream_arn}/*" + ] + }, + ] + }) +} + +resource "aws_dynamodb_table" "source" { + name = "%[1]s-source" + billing_mode = "PAY_PER_REQUEST" + hash_key = "PK" + range_key = "SK" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + + attribute { + name = "PK" + type = "S" + } + + attribute { + name = "SK" + type = "S" + } +} + +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:PutLogEvents", + ], + Resource = [ + aws_cloudwatch_log_stream.target.arn, + ] + }, + ] + }) +} + +resource "aws_cloudwatch_log_group" "target" { + name = "%[1]s-target" +} + +resource "aws_cloudwatch_log_stream" "target" { + name = "%[1]s-target" + log_group_name = aws_cloudwatch_log_group.target.name +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_dynamodb_table.source.stream_arn + target = aws_cloudwatch_log_group.target.arn + + source_parameters { + dynamodb_stream_parameters { + starting_position = "LATEST" + } + } + + target_parameters { + cloudwatch_logs_parameters { + log_stream_name = aws_cloudwatch_log_stream.target.name + } + } +} +`, rName)) +} From c79ef82f4d5f351fb916de4cbac9c714cd145285 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 24 Jun 2023 15:50:57 -0400 Subject: [PATCH 49/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget -timeout 180m === RUN TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget === PAUSE TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget === CONT TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget --- PASS: TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget (75.22s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 80.505s From f00ee8b9ce8b2cfedfc2d5758f2a5caaa5c98550 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Sat, 24 Jun 2023 17:22:20 -0400 Subject: [PATCH 50/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_activeMQSourceStepFunctionTarget'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_activeMQSourceStepFunctionTarget' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_activeMQSourceStepFunctionTarget -timeout 180m === RUN TestAccPipesPipe_activeMQSourceStepFunctionTarget === PAUSE TestAccPipesPipe_activeMQSourceStepFunctionTarget === CONT TestAccPipesPipe_activeMQSourceStepFunctionTarget --- PASS: TestAccPipesPipe_activeMQSourceStepFunctionTarget (1084.58s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 1090.076s --- internal/service/pipes/pipe_test.go | 212 ++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index fd1739134f6..3026a37b9ce 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -725,6 +725,74 @@ func TestAccPipesPipe_dynamoDBSourceCloudWatchLogsTarget(t *testing.T) { }) } +func TestAccPipesPipe_activeMQSourceStepFunctionTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicActiveMQSourceStepFunctionTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.credentials.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "source_parameters.0.activemq_broker_parameters.0.credentials.0.basic_auth"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.0.queue_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sfn_state_machine.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.0.invocation_type", "REQUEST_RESPONSE"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -1473,3 +1541,147 @@ resource "aws_pipes_pipe" "test" { } `, rName)) } + +func testAccPipeConfig_basicActiveMQSourceStepFunctionTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "mq:DescribeBroker", + "secretsmanager:GetSecretValue", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = [ + "*" + ] + }, + ] + }) +} + +resource "aws_security_group" "source" { + name = "%[1]s-source" + + ingress { + from_port = 61617 + to_port = 61617 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = %[1]q + } +} + +resource "aws_mq_broker" "source" { + broker_name = "%[1]s-source" + engine_type = "ActiveMQ" + engine_version = "5.15.0" + host_instance_type = "mq.t2.micro" + security_groups = [aws_security_group.source.id] + authentication_strategy = "simple" + storage_type = "efs" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } + + publicly_accessible = true +} + +resource "aws_secretsmanager_secret" "source" { + name = "%[1]s-source" +} + +resource "aws_secretsmanager_secret_version" "source" { + secret_id = aws_secretsmanager_secret.source.id + secret_string = jsonencode({ username = "Test", password = "TestTest1234" }) +} + +resource "aws_iam_role" "target" { + name = "%[1]s-target" + + assume_role_policy = < Date: Sat, 24 Jun 2023 18:30:33 -0400 Subject: [PATCH 51/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_rabbitMQSourceEventBusTarget'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_rabbitMQSourceEventBusTarget' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_rabbitMQSourceEventBusTarget -timeout 180m === RUN TestAccPipesPipe_rabbitMQSourceEventBusTarget === PAUSE TestAccPipesPipe_rabbitMQSourceEventBusTarget === CONT TestAccPipesPipe_rabbitMQSourceEventBusTarget --- PASS: TestAccPipesPipe_rabbitMQSourceEventBusTarget (718.07s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 723.681s --- internal/service/pipes/pipe_test.go | 163 ++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 3026a37b9ce..b7364f77b15 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -793,6 +793,62 @@ func TestAccPipesPipe_activeMQSourceStepFunctionTarget(t *testing.T) { }) } +func TestAccPipesPipe_rabbitMQSourceEventBusTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicRabbitMQSourceEventBusTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_mq_broker.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.batch_size", "10"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.credentials.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.credentials.0.basic_auth"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.queue_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.0.virtual_host", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_cloudwatch_event_bus.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -1685,3 +1741,110 @@ resource "aws_pipes_pipe" "test" { } `, rName)) } + +func testAccPipeConfig_basicRabbitMQSourceEventBusTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "mq:DescribeBroker", + "secretsmanager:GetSecretValue", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeVpcs", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = [ + "*" + ] + }, + ] + }) +} + +resource "aws_mq_broker" "source" { + broker_name = "%[1]s-source" + engine_type = "RabbitMQ" + engine_version = "3.8.11" + host_instance_type = "mq.t3.micro" + authentication_strategy = "simple" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } + + publicly_accessible = true +} + +resource "aws_secretsmanager_secret" "source" { + name = "%[1]s-source" +} + +resource "aws_secretsmanager_secret_version" "source" { + secret_id = aws_secretsmanager_secret.source.id + secret_string = jsonencode({ username = "Test", password = "TestTest1234" }) +} + +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "events:PutEvent", + ], + Resource = [ + aws_cloudwatch_event_bus.target.arn, + ] + }, + ] + }) +} + +resource "aws_cloudwatch_event_bus" "target" { + name = "%[1]s-target" +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_mq_broker.source.arn + target = aws_cloudwatch_event_bus.target.arn + + source_parameters { + rabbitmq_broker_parameters { + queue_name = "test" + + credentials { + basic_auth = aws_secretsmanager_secret_version.source.arn + } + } + } +} +`, rName)) +} From b3bd8725fc1194a40f5ec6de3409fd679e2b9e8e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 08:45:23 -0400 Subject: [PATCH 52/65] Add 'TestAccPipesPipe_mskSourceHTTPTarget'. --- internal/service/pipes/pipe_test.go | 222 ++++++++++++++++++++++++++++ 1 file changed, 222 insertions(+) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index b7364f77b15..17885c4c1dd 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -849,6 +849,82 @@ func TestAccPipesPipe_rabbitMQSourceEventBusTarget(t *testing.T) { }) } +func TestAccPipesPipe_mskSourceHTTPTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicMSKSourceHTTPTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_msk_cluster.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.batch_size", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.consumer_group_id", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.credentials.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.starting_position", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.0.topic_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "target"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.header_parameters.X-Test", "test"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.path_parameter_values.0", "p1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.0.query_string_parameters.testing", "yes"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -1630,6 +1706,8 @@ resource "aws_iam_role_policy" "source" { }, ] }) + + depends_on = [aws_mq_broker.source] } resource "aws_security_group" "source" { @@ -1774,6 +1852,8 @@ resource "aws_iam_role_policy" "source" { }, ] }) + + depends_on = [aws_mq_broker.source] } resource "aws_mq_broker" "source" { @@ -1848,3 +1928,145 @@ resource "aws_pipes_pipe" "test" { } `, rName)) } + +func testAccPipeConfig_basicMSKSourceHTTPTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + acctest.ConfigVPCWithSubnets(rName, 3), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "kafka:DescribeCluster", + "kafka:GetBootstrapBrokers", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = [ + "*" + ] + }, + ] + }) + + depends_on = [aws_msk_cluster.source] +} + +resource "aws_security_group" "source" { + name = "%[1]s-source" + vpc_id = aws_vpc.test.id + + tags = { + Name = "%[1]s-source" + } +} + +resource "aws_msk_cluster" "source" { + cluster_name = "%[1]s-source" + kafka_version = "2.7.1" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = aws_subnet.test[*].id + instance_type = "kafka.m5.large" + security_groups = [aws_security_group.source.id] + + storage_info { + ebs_storage_info { + volume_size = 10 + } + } + } +} + +resource "aws_api_gateway_rest_api" "target" { + name = "%[1]s-target" + + body = jsonencode({ + openapi = "3.0.1" + info = { + title = "example" + version = "1.0" + } + paths = { + "/" = { + get = { + x-amazon-apigateway-integration = { + httpMethod = "GET" + payloadFormatVersion = "1.0" + type = "HTTP_PROXY" + uri = "https://ip-ranges.amazonaws.com" + } + } + } + } + }) +} + +resource "aws_api_gateway_deployment" "target" { + rest_api_id = aws_api_gateway_rest_api.target.id + + triggers = { + redeployment = sha1(jsonencode(aws_api_gateway_rest_api.target.body)) + } + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_api_gateway_stage" "target" { + deployment_id = aws_api_gateway_deployment.target.id + rest_api_id = aws_api_gateway_rest_api.target.id + stage_name = "test" +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_msk_cluster.source.arn + target = "${aws_api_gateway_stage.target.execution_arn}/GET/*" + + source_parameters { + managed_streaming_kafka_parameters { + topic_name = "test" + } + } + + target_parameters { + http_parameters { + header_parameters = { + "X-Test" = "test" + } + + path_parameter_values = ["p1"] + + query_string_parameters = { + "testing" = "yes" + } + } + } +} +`, rName)) +} + +// TODO +// Enrichment: HTTP +// Sources: self_managed_kafka_parameters +// Targets: batch_job_parameters, ecs_task_parameters, lambda_function_parameters, redshift_data_parameters, sagemaker_pipeline_parameters From 2181c3e44640e2a10039cb9038d52bd611da5804 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 10:59:28 -0400 Subject: [PATCH 53/65] Add 'TestAccPipesPipe_selfManagedKafkaSourceLambdaFunctionTarget'. --- internal/service/pipes/pipe_test.go | 180 +++++++++++++++++++++++++++- 1 file changed, 178 insertions(+), 2 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 17885c4c1dd..ad492cdc415 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -925,6 +925,83 @@ func TestAccPipesPipe_mskSourceHTTPTarget(t *testing.T) { }) } +func TestAccPipesPipe_selfManagedKafkaSourceLambdaFunctionTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSelfManagedKafkaSourceLambdaFunctionTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "source", "smk://test1:9092,test2:9092"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.additional_bootstrap_servers.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.additional_bootstrap_servers.*", "testing:1234"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.batch_size", "100"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.consumer_group_id", "self-managed-test-group-id"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.credentials.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.maximum_batching_window_in_seconds", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.server_root_ca_certificate", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.starting_position", ""), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.topic_name", "test"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.0.vpc.0.subnets.#", "2"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_lambda_function.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.0.invocation_type", "REQUEST_RESPONSE"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -2066,7 +2143,106 @@ resource "aws_pipes_pipe" "test" { `, rName)) } +func testAccPipeConfig_basicSelfManagedKafkaSourceLambdaFunctionTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` +resource "aws_iam_role_policy" "source" { + role = aws_iam_role.test.id + name = "%[1]s-source" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + Resource = [ + "*" + ] + }, + ] + }) +} + +resource "aws_security_group" "source" { + name = "%[1]s-source" + vpc_id = aws_vpc.test.id + + tags = { + Name = "%[1]s-source" + } +} + +resource "aws_iam_role" "target" { + name = "%[1]s-target" + + assume_role_policy = < Date: Mon, 26 Jun 2023 11:56:26 -0400 Subject: [PATCH 54/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_sqsSourceRedshiftTarget'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_sqsSourceRedshiftTarget' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_sqsSourceRedshiftTarget -timeout 180m === RUN TestAccPipesPipe_sqsSourceRedshiftTarget === PAUSE TestAccPipesPipe_sqsSourceRedshiftTarget === CONT TestAccPipesPipe_sqsSourceRedshiftTarget --- PASS: TestAccPipesPipe_sqsSourceRedshiftTarget (316.69s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 323.496s --- internal/service/pipes/pipe_test.go | 117 +++++++++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index ad492cdc415..b2bcb8d4a29 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1002,6 +1002,76 @@ func TestAccPipesPipe_selfManagedKafkaSourceLambdaFunctionTarget(t *testing.T) { }) } +func TestAccPipesPipe_sqsSourceRedshiftTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceRedshiftTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.batch_size", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.0.maximum_batching_window_in_seconds", "90"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_redshift_cluster.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.database", "db1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.db_user", "user1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.secret_manager_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.sqls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.statement_name", "SelectAll"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.0.with_event", "false"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -2243,6 +2313,51 @@ resource "aws_pipes_pipe" "test" { `, rName)) } +func testAccPipeConfig_basicSQSSourceRedshiftTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + acctest.ConfigAvailableAZsNoOptInExclude("usw2-az2"), + fmt.Sprintf(` +resource "aws_redshift_cluster" "target" { + cluster_identifier = "%[1]s-target" + availability_zone = data.aws_availability_zones.available.names[0] + database_name = "test" + master_username = "tfacctest" + master_password = "Mustbe8characters" + node_type = "dc2.large" + automated_snapshot_retention_period = 0 + allow_version_upgrade = false + skip_final_snapshot = true +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_redshift_cluster.target.arn + + source_parameters { + sqs_queue_parameters { + batch_size = 1 + maximum_batching_window_in_seconds = 90 + } + } + + target_parameters { + redshift_data_parameters { + database = "db1" + db_user = "user1" + sqls = ["SELECT * FROM table"] + statement_name = "SelectAll" + } + } +} +`, rName)) +} + // TODO // Enrichment: HTTP -// Targets: batch_job_parameters, ecs_task_parameters, redshift_data_parameters, sagemaker_pipeline_parameters +// Targets: batch_job_parameters, ecs_task_parameters, sagemaker_pipeline_parameters From 77bd4d50d5b607eb8fa0c1a0cc471a8fc04dbb8a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 13:14:25 -0400 Subject: [PATCH 55/65] Add 'TestAccPipesPipe_sqsSourceSagemakerTarget'. --- internal/service/pipes/pipe_test.go | 103 +++++++++++++++++++++++++++- 1 file changed, 102 insertions(+), 1 deletion(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index b2bcb8d4a29..4f1e884e177 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1072,6 +1072,75 @@ func TestAccPipesPipe_sqsSourceRedshiftTarget(t *testing.T) { }) } +func TestAccPipesPipe_sqsSourceSagemakerTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + acctest.Skip(t, "aws_sagemaker_pipeline resource not yet implemented") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceSagemakerTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sagemaker_pipeline.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.#", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.0.name", "p1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.0.value", "v1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.1.name", "p2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.0.pipeline_parameter.1.value", "v2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -2358,6 +2427,38 @@ resource "aws_pipes_pipe" "test" { `, rName)) } +func testAccPipeConfig_basicSQSSourceSagemakerTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + fmt.Sprintf(` +# TODO Add aws_sagemaker_pipeline resource. + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sagemaker_pipeline.target.arn + + target_parameters { + sagemaker_pipeline_parameters { + pipeline_parameter { + name = "p1" + value = "v1" + } + + pipeline_parameter { + name = "p2" + value = "v2" + } + } + } +} +`, rName)) +} + // TODO // Enrichment: HTTP -// Targets: batch_job_parameters, ecs_task_parameters, sagemaker_pipeline_parameters +// Targets: batch_job_parameters, ecs_task_parameters From cfd555472ef06fbc7c079a775231ea2eae612490 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 14:32:39 -0400 Subject: [PATCH 56/65] % make testacc TESTARGS='-run=TestAccPipesPipe_sqsSourceBatchJobTarget' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_sqsSourceBatchJobTarget -timeout 180m === RUN TestAccPipesPipe_sqsSourceBatchJobTarget === PAUSE TestAccPipesPipe_sqsSourceBatchJobTarget === CONT TestAccPipesPipe_sqsSourceBatchJobTarget --- PASS: TestAccPipesPipe_sqsSourceBatchJobTarget (164.41s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 170.141s --- internal/service/pipes/pipe_test.go | 229 +++++++++++++++++++++++++++- 1 file changed, 227 insertions(+), 2 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index 4f1e884e177..c834f8357f5 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1109,7 +1109,7 @@ func TestAccPipesPipe_sqsSourceSagemakerTarget(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), - resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrPair(resourceName, "target", "aws_sagemaker_pipeline.target", "arn"), resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), @@ -1141,6 +1141,88 @@ func TestAccPipesPipe_sqsSourceSagemakerTarget(t *testing.T) { }) } +func TestAccPipesPipe_sqsSourceBatchJobTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceBatchJobTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_batch_job_queue.target", "arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.array_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.array_properties.0.size", "512"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.#", "3"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.0", "rm"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.1", "-fr"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.command.2", "/"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.0.name", "TMP"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.environment.0.value", "/tmp2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.instance_type", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.0.type", "GPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.container_overrides.0.resource_requirement.0.value", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.depends_on.#", "0"), + resource.TestCheckResourceAttrSet(resourceName, "target_parameters.0.batch_job_parameters.0.job_definition"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.job_name", "testing"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.parameters.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.0.retry_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -2459,6 +2541,149 @@ resource "aws_pipes_pipe" "test" { `, rName)) } +func testAccPipeConfig_basicSQSSourceBatchJobTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + acctest.ConfigVPCWithSubnets(rName, 2), + fmt.Sprintf(` +resource "aws_iam_role" "target" { + name = "%[1]s-target" + assume_role_policy = < Date: Mon, 26 Jun 2023 15:55:35 -0400 Subject: [PATCH 57/65] Add 'TestAccPipesPipe_sqsSourceECSTaskTarget'. --- internal/service/pipes/pipe_test.go | 253 ++++++++++++++++++++++++---- 1 file changed, 224 insertions(+), 29 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index c834f8357f5..de0f0c02c99 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1223,6 +1223,112 @@ func TestAccPipesPipe_sqsSourceBatchJobTarget(t *testing.T) { }) } +func TestAccPipesPipe_sqsSourceECSTaskTarget(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + acctest.Skip(t, "ValidationException: [numeric instance is lower than the required minimum (minimum: 1, found: 0)]") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_basicSQSSourceECSTaskTarget(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), + resource.TestCheckResourceAttr(resourceName, "description", "Managed by Terraform"), + resource.TestCheckResourceAttr(resourceName, "desired_state", "RUNNING"), + resource.TestCheckResourceAttr(resourceName, "enrichment", ""), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "role_arn", "aws_iam_role.test", "arn"), + resource.TestCheckResourceAttrPair(resourceName, "source", "aws_sqs_queue.source", "arn"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.activemq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.dynamodb_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.filter_criteria.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.managed_streaming_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.rabbitmq_broker_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.self_managed_kafka_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "source_parameters.0.sqs_queue_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "target", "aws_ecs_cluster.target", "id"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.batch_job_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.cloudwatch_logs_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.capacity_provider_strategy.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.enable_ecs_managed_tags", "true"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.enable_execute_command", "false"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.group", "g1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.launch_type", "FARGATE"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.assign_public_ip", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.security_groups.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.network_configuration.0.aws_vpc_configuration.0.subnets.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.command.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.cpu", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.0.name", "TMP"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment.0.value", "/tmp2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.environment_file.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.memory", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.memory_reservation", "1024"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.name", "first"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.0.type", "GPU"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.container_override.0.resource_requirement.0.value", "2"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.cpu", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.ephemeral_storage.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.ephemeral_storage.0.size_in_gib", "32"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.execution_role_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.inference_accelerator_override.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.memory", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.overrides.0.task_role_arn", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_constraint.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.0.field", "cpu"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.placement_strategy.0.type", "binpack"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.platform_version", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.propagate_tags", "TASK_DEFINITION"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.reference_id", "refid"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.ecs_task_parameters.0.task_count", "1"), + resource.TestCheckResourceAttrSet(resourceName, "target_parameters.0.ecs_task_parameters.0.task_definition_arn"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.eventbridge_event_bus_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.http_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.input_template", ""), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.kinesis_stream_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.lambda_function_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.redshift_data_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sagemaker_pipeline_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.sqs_queue_parameters.#", "0"), + resource.TestCheckResourceAttr(resourceName, "target_parameters.0.step_function_state_machine_parameters.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckPipeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).PipesClient(ctx) @@ -1374,34 +1480,6 @@ resource "aws_sqs_queue" "target" { `, rName) } -func testAccPipeConfig_baseSQSDeadLetter(rName string) string { - return fmt.Sprintf(` -resource "aws_iam_role_policy" "deadletter" { - role = aws_iam_role.test.id - name = "%[1]s-deadletter" - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Action = [ - "sqs:*", - ], - Resource = [ - aws_sqs_queue.deadletter.arn, - ] - }, - ] - }) -} - -resource "aws_sqs_queue" "deadletter" { - name = "%[1]s-deadletter" -} -`, rName) -} - func testAccPipeConfig_baseKinesisSource(rName string) string { return fmt.Sprintf(` resource "aws_iam_role_policy" "source" { @@ -2684,6 +2762,123 @@ resource "aws_pipes_pipe" "test" { `, rName)) } +func testAccPipeConfig_basicSQSSourceECSTaskTarget(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + acctest.ConfigVPCWithSubnets(rName, 1), + fmt.Sprintf(` +resource "aws_iam_role_policy" "target" { + role = aws_iam_role.test.id + name = "%[1]s-target" + + policy = < Date: Mon, 26 Jun 2023 17:06:07 -0400 Subject: [PATCH 58/65] r/aws_pipes_pipe: Add 'TestAccPipesPipe_enrichmentParameters'. Acceptance test output: % make testacc TESTARGS='-run=TestAccPipesPipe_enrichmentParameters' PKG=pipes ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/pipes/... -v -count 1 -parallel 20 -run=TestAccPipesPipe_enrichmentParameters -timeout 180m === RUN TestAccPipesPipe_enrichmentParameters === PAUSE TestAccPipesPipe_enrichmentParameters === CONT TestAccPipesPipe_enrichmentParameters --- PASS: TestAccPipesPipe_enrichmentParameters (140.48s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/pipes 146.365s --- .../service/pipes/enrichment_parameters.go | 1 + internal/service/pipes/pipe_test.go | 159 +++++++++++++++++- internal/service/pipes/target_parameters.go | 1 + 3 files changed, 158 insertions(+), 3 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go index 292cf825799..e0cb2496737 100644 --- a/internal/service/pipes/enrichment_parameters.go +++ b/internal/service/pipes/enrichment_parameters.go @@ -29,6 +29,7 @@ func enrichmentParametersSchema() *schema.Schema { "path_parameter_values": { Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, }, diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index de0f0c02c99..b17b88e4edb 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -253,6 +253,61 @@ func TestAccPipesPipe_enrichment(t *testing.T) { }) } +func TestAccPipesPipe_enrichmentParameters(t *testing.T) { + ctx := acctest.Context(t) + var pipe pipes.DescribePipeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pipes_pipe.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PipesEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PipesEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPipeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPipeConfig_enrichmentParameters(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-1", "Val1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.0", "p1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.q1", "abc"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPipeConfig_enrichmentParametersUpdated(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckPipeExists(ctx, resourceName, &pipe), + resource.TestCheckResourceAttrPair(resourceName, "enrichment", "aws_cloudwatch_event_api_destination.test", "arn"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.%", "2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-1", "Val1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.header_parameters.X-Test-2", "Val2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.#", "1"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.path_parameter_values.0", "p2"), + resource.TestCheckResourceAttr(resourceName, "enrichment_parameters.0.http_parameters.0.query_string_parameters.%", "0"), + ), + }, + }, + }) +} + func TestAccPipesPipe_sourceParameters_filterCriteria(t *testing.T) { ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput @@ -1644,6 +1699,107 @@ resource "aws_pipes_pipe" "test" { `, rName, i)) } +func testAccPipeConfig_enrichmentParameters(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_cloudwatch_event_connection" "test" { + name = %[1]q + authorization_type = "API_KEY" + + auth_parameters { + api_key { + key = "testKey" + value = "testValue" + } + } +} + +resource "aws_cloudwatch_event_api_destination" "test" { + name = %[1]q + invocation_endpoint = "https://example.com/" + http_method = "POST" + connection_arn = aws_cloudwatch_event_connection.test.arn +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.test.arn + + enrichment_parameters { + http_parameters { + header_parameters = { + "X-Test-1" = "Val1" + } + + path_parameter_values = ["p1"] + + query_string_parameters = { + "q1" = "abc" + } + } + } +} +`, rName)) +} + +func testAccPipeConfig_enrichmentParametersUpdated(rName string) string { + return acctest.ConfigCompose( + testAccPipeConfig_base(rName), + testAccPipeConfig_baseSQSSource(rName), + testAccPipeConfig_baseSQSTarget(rName), + fmt.Sprintf(` +resource "aws_cloudwatch_event_connection" "test" { + name = %[1]q + authorization_type = "API_KEY" + + auth_parameters { + api_key { + key = "testKey" + value = "testValue" + } + } +} + +resource "aws_cloudwatch_event_api_destination" "test" { + name = %[1]q + invocation_endpoint = "https://example.com/" + http_method = "POST" + connection_arn = aws_cloudwatch_event_connection.test.arn +} + +resource "aws_pipes_pipe" "test" { + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + + name = %[1]q + role_arn = aws_iam_role.test.arn + source = aws_sqs_queue.source.arn + target = aws_sqs_queue.target.arn + + enrichment = aws_cloudwatch_event_api_destination.test.arn + + enrichment_parameters { + http_parameters { + header_parameters = { + "X-Test-1" = "Val1" + "X-Test-2" = "Val2" + } + + path_parameter_values = ["p2"] + } + } +} +`, rName)) +} + func testAccPipeConfig_sourceParameters_filterCriteria1(rName, criteria1 string) string { return acctest.ConfigCompose( testAccPipeConfig_base(rName), @@ -2879,6 +3035,3 @@ resource "aws_pipes_pipe" "test" { } `, rName)) } - -// TODO -// Enrichment: HTTP diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index db542366c0e..6b9fc9fa571 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -583,6 +583,7 @@ func targetParametersSchema() *schema.Schema { "path_parameter_values": { Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Schema{ Type: schema.TypeString, }, From 622cb2abd4676e7d1016fa0fc7f08e2a89f2eddc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:17:05 -0400 Subject: [PATCH 59/65] Fix terrafmt errors in acceptance test configurations. --- internal/service/pipes/pipe_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index b17b88e4edb..a1598a4c8f1 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1904,7 +1904,7 @@ func testAccPipeConfig_namePrefix(rName, namePrefix string) string { testAccPipeConfig_baseSQSTarget(rName), fmt.Sprintf(` resource "aws_pipes_pipe" "test" { - depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] + depends_on = [aws_iam_role_policy.source, aws_iam_role_policy.target] name_prefix = %[1]q role_arn = aws_iam_role.test.arn @@ -2437,7 +2437,7 @@ resource "aws_iam_role_policy" "target" { } resource "aws_cloudwatch_event_bus" "target" { - name = "%[1]s-target" + name = "%[1]s-target" } resource "aws_pipes_pipe" "test" { @@ -2901,7 +2901,7 @@ resource "aws_pipes_pipe" "test" { } resource_requirement { - type = "GPU" + type = "GPU" value = "1" } } @@ -2910,7 +2910,7 @@ resource "aws_pipes_pipe" "test" { job_name = "testing" parameters = { - "Key1" = "Value1" + "Key1" = "Value1" } } } From db237c6ca32ac16659f9c262c63609444389626b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:17:50 -0400 Subject: [PATCH 60/65] Fix markdown-lint 'MD012/no-multiple-blanks Multiple consecutive blank lines [Expected: 1; Actual: 2]'. --- internal/service/pipes/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/pipes/README.md b/internal/service/pipes/README.md index d9709ed6b24..5eff3285fc5 100644 --- a/internal/service/pipes/README.md +++ b/internal/service/pipes/README.md @@ -3,4 +3,3 @@ * AWS Provider: [Contribution Guide](https://hashicorp.github.io/terraform-provider-aws/#contribute) * Service User Guide: [Amazon EventBridge Pipes](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html) * Service API Guide: [Welcome](https://docs.aws.amazon.com/eventbridge/latest/pipes-reference/Welcome.html) - From 4d08912e299a091f840acff74b3195a40e6d7c22 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:28:33 -0400 Subject: [PATCH 61/65] Fix semgrep errors. --- .../service/pipes/enrichment_parameters.go | 8 +- internal/service/pipes/pipe_test.go | 6 +- internal/service/pipes/source_parameters.go | 22 +-- internal/service/pipes/target_parameters.go | 128 +++++++++--------- 4 files changed, 82 insertions(+), 82 deletions(-) diff --git a/internal/service/pipes/enrichment_parameters.go b/internal/service/pipes/enrichment_parameters.go index e0cb2496737..a97da43288a 100644 --- a/internal/service/pipes/enrichment_parameters.go +++ b/internal/service/pipes/enrichment_parameters.go @@ -60,7 +60,7 @@ func expandPipeEnrichmentParameters(tfMap map[string]interface{}) *types.PipeEnr apiObject := &types.PipeEnrichmentParameters{} if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.HttpParameters = expandPipeEnrichmentHttpParameters(v[0].(map[string]interface{})) + apiObject.HttpParameters = expandPipeEnrichmentHTTPParameters(v[0].(map[string]interface{})) } if v, ok := tfMap["input_template"].(string); ok && v != "" { @@ -70,7 +70,7 @@ func expandPipeEnrichmentParameters(tfMap map[string]interface{}) *types.PipeEnr return apiObject } -func expandPipeEnrichmentHttpParameters(tfMap map[string]interface{}) *types.PipeEnrichmentHttpParameters { +func expandPipeEnrichmentHTTPParameters(tfMap map[string]interface{}) *types.PipeEnrichmentHttpParameters { if tfMap == nil { return nil } @@ -100,7 +100,7 @@ func flattenPipeEnrichmentParameters(apiObject *types.PipeEnrichmentParameters) tfMap := map[string]interface{}{} if v := apiObject.HttpParameters; v != nil { - tfMap["http_parameters"] = []interface{}{flattenPipeEnrichmentHttpParameters(v)} + tfMap["http_parameters"] = []interface{}{flattenPipeEnrichmentHTTPParameters(v)} } if v := apiObject.InputTemplate; v != nil { @@ -110,7 +110,7 @@ func flattenPipeEnrichmentParameters(apiObject *types.PipeEnrichmentParameters) return tfMap } -func flattenPipeEnrichmentHttpParameters(apiObject *types.PipeEnrichmentHttpParameters) map[string]interface{} { +func flattenPipeEnrichmentHTTPParameters(apiObject *types.PipeEnrichmentHttpParameters) map[string]interface{} { if apiObject == nil { return nil } diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index a1598a4c8f1..df0b1e022a9 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -1127,7 +1127,7 @@ func TestAccPipesPipe_sqsSourceRedshiftTarget(t *testing.T) { }) } -func TestAccPipesPipe_sqsSourceSagemakerTarget(t *testing.T) { +func TestAccPipesPipe_SourceSageMakerTarget(t *testing.T) { ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1146,7 +1146,7 @@ func TestAccPipesPipe_sqsSourceSagemakerTarget(t *testing.T) { CheckDestroy: testAccCheckPipeDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPipeConfig_basicSQSSourceSagemakerTarget(rName), + Config: testAccPipeConfig_basicSQSSourceSageMakerTarget(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckPipeExists(ctx, resourceName, &pipe), acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "pipes", regexp.MustCompile(regexp.QuoteMeta(`pipe/`+rName))), @@ -2743,7 +2743,7 @@ resource "aws_pipes_pipe" "test" { `, rName)) } -func testAccPipeConfig_basicSQSSourceSagemakerTarget(rName string) string { +func testAccPipeConfig_basicSQSSourceSageMakerTarget(rName string) string { return acctest.ConfigCompose( testAccPipeConfig_base(rName), testAccPipeConfig_baseSQSSource(rName), diff --git a/internal/service/pipes/source_parameters.go b/internal/service/pipes/source_parameters.go index 181b70c0b6c..a02b3683972 100644 --- a/internal/service/pipes/source_parameters.go +++ b/internal/service/pipes/source_parameters.go @@ -595,7 +595,7 @@ func expandPipeSourceParameters(tfMap map[string]interface{}) *types.PipeSourceP } if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.SqsQueueParameters = expandPipeSourceSqsQueueParameters(v[0].(map[string]interface{})) + apiObject.SqsQueueParameters = expandPipeSourceSQSQueueParameters(v[0].(map[string]interface{})) } return apiObject @@ -639,7 +639,7 @@ func expandUpdatePipeSourceParameters(tfMap map[string]interface{}) *types.Updat } if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.SqsQueueParameters = expandUpdatePipeSourceSqsQueueParameters(v[0].(map[string]interface{})) + apiObject.SqsQueueParameters = expandUpdatePipeSourceSQSQueueParameters(v[0].(map[string]interface{})) } return apiObject @@ -1119,7 +1119,7 @@ func expandPipeSourceSelfManagedKafkaParameters(tfMap map[string]interface{}) *t } if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVpc(v[0].(map[string]interface{})) + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVPC(v[0].(map[string]interface{})) } return apiObject @@ -1149,7 +1149,7 @@ func expandUpdatePipeSourceSelfManagedKafkaParameters(tfMap map[string]interface } if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVpc(v[0].(map[string]interface{})) + apiObject.Vpc = expandSelfManagedKafkaAccessConfigurationVPC(v[0].(map[string]interface{})) } else { apiObject.Vpc = &types.SelfManagedKafkaAccessConfigurationVpc{} } @@ -1197,7 +1197,7 @@ func expandSelfManagedKafkaAccessConfigurationCredentials(tfMap map[string]inter return nil } -func expandSelfManagedKafkaAccessConfigurationVpc(tfMap map[string]interface{}) *types.SelfManagedKafkaAccessConfigurationVpc { +func expandSelfManagedKafkaAccessConfigurationVPC(tfMap map[string]interface{}) *types.SelfManagedKafkaAccessConfigurationVpc { if tfMap == nil { return nil } @@ -1215,7 +1215,7 @@ func expandSelfManagedKafkaAccessConfigurationVpc(tfMap map[string]interface{}) return apiObject } -func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { +func expandPipeSourceSQSQueueParameters(tfMap map[string]interface{}) *types.PipeSourceSqsQueueParameters { if tfMap == nil { return nil } @@ -1233,7 +1233,7 @@ func expandPipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.Pip return apiObject } -func expandUpdatePipeSourceSqsQueueParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSqsQueueParameters { +func expandUpdatePipeSourceSQSQueueParameters(tfMap map[string]interface{}) *types.UpdatePipeSourceSqsQueueParameters { if tfMap == nil { return nil } @@ -1287,7 +1287,7 @@ func flattenPipeSourceParameters(apiObject *types.PipeSourceParameters) map[stri } if v := apiObject.SqsQueueParameters; v != nil { - tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSqsQueueParameters(v)} + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeSourceSQSQueueParameters(v)} } return tfMap @@ -1587,7 +1587,7 @@ func flattenPipeSourceSelfManagedKafkaParameters(apiObject *types.PipeSourceSelf } if v := apiObject.Vpc; v != nil { - tfMap["vpc"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationVpc(v)} + tfMap["vpc"] = []interface{}{flattenSelfManagedKafkaAccessConfigurationVPC(v)} } return tfMap @@ -1627,7 +1627,7 @@ func flattenSelfManagedKafkaAccessConfigurationCredentials(apiObject types.SelfM return tfMap } -func flattenSelfManagedKafkaAccessConfigurationVpc(apiObject *types.SelfManagedKafkaAccessConfigurationVpc) map[string]interface{} { +func flattenSelfManagedKafkaAccessConfigurationVPC(apiObject *types.SelfManagedKafkaAccessConfigurationVpc) map[string]interface{} { if apiObject == nil { return nil } @@ -1645,7 +1645,7 @@ func flattenSelfManagedKafkaAccessConfigurationVpc(apiObject *types.SelfManagedK return tfMap } -func flattenPipeSourceSqsQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { +func flattenPipeSourceSQSQueueParameters(apiObject *types.PipeSourceSqsQueueParameters) map[string]interface{} { if apiObject == nil { return nil } diff --git a/internal/service/pipes/target_parameters.go b/internal/service/pipes/target_parameters.go index 6b9fc9fa571..ab6a9369d28 100644 --- a/internal/service/pipes/target_parameters.go +++ b/internal/service/pipes/target_parameters.go @@ -827,7 +827,7 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP } if v, ok := tfMap["ecs_task_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.EcsTaskParameters = expandPipeTargetEcsTaskParameters(v[0].(map[string]interface{})) + apiObject.EcsTaskParameters = expandPipeTargetECSTaskParameters(v[0].(map[string]interface{})) } if v, ok := tfMap["eventbridge_event_bus_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -835,7 +835,7 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP } if v, ok := tfMap["http_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.HttpParameters = expandPipeTargetHttpParameters(v[0].(map[string]interface{})) + apiObject.HttpParameters = expandPipeTargetHTTPParameters(v[0].(map[string]interface{})) } if v, ok := tfMap["input_template"].(string); ok && v != "" { @@ -859,7 +859,7 @@ func expandPipeTargetParameters(tfMap map[string]interface{}) *types.PipeTargetP } if v, ok := tfMap["sqs_queue_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.SqsQueueParameters = expandPipeTargetSqsQueueParameters(v[0].(map[string]interface{})) + apiObject.SqsQueueParameters = expandPipeTargetSQSQueueParameters(v[0].(map[string]interface{})) } if v, ok := tfMap["step_function_state_machine_parameters"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -1111,7 +1111,7 @@ func expandPipeTargetCloudWatchLogsParameters(tfMap map[string]interface{}) *typ return apiObject } -func expandPipeTargetEcsTaskParameters(tfMap map[string]interface{}) *types.PipeTargetEcsTaskParameters { +func expandPipeTargetECSTaskParameters(tfMap map[string]interface{}) *types.PipeTargetEcsTaskParameters { if tfMap == nil { return nil } @@ -1143,7 +1143,7 @@ func expandPipeTargetEcsTaskParameters(tfMap map[string]interface{}) *types.Pipe } if v, ok := tfMap["overrides"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.Overrides = expandEcsTaskOverride(v[0].(map[string]interface{})) + apiObject.Overrides = expandECSTaskOverride(v[0].(map[string]interface{})) } if v, ok := tfMap["placement_constraint"].([]interface{}); ok && len(v) > 0 { @@ -1239,13 +1239,13 @@ func expandNetworkConfiguration(tfMap map[string]interface{}) *types.NetworkConf apiObject := &types.NetworkConfiguration{} if v, ok := tfMap["aws_vpc_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.AwsvpcConfiguration = expandAWSVPCConfiguration(v[0].(map[string]interface{})) + apiObject.AwsvpcConfiguration = expandVPCConfiguration(v[0].(map[string]interface{})) } return apiObject } -func expandAWSVPCConfiguration(tfMap map[string]interface{}) *types.AwsVpcConfiguration { +func expandVPCConfiguration(tfMap map[string]interface{}) *types.AwsVpcConfiguration { if tfMap == nil { return nil } @@ -1267,7 +1267,7 @@ func expandAWSVPCConfiguration(tfMap map[string]interface{}) *types.AwsVpcConfig return apiObject } -func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride { +func expandECSTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride { if tfMap == nil { return nil } @@ -1275,7 +1275,7 @@ func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride apiObject := &types.EcsTaskOverride{} if v, ok := tfMap["container_override"].([]interface{}); ok && len(v) > 0 { - apiObject.ContainerOverrides = expandEcsContainerOverrides(v) + apiObject.ContainerOverrides = expandECSContainerOverrides(v) } if v, ok := tfMap["cpu"].(string); ok && v != "" { @@ -1283,7 +1283,7 @@ func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride } if v, ok := tfMap["ephemeral_storage"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.EphemeralStorage = expandEcsEphemeralStorage(v[0].(map[string]interface{})) + apiObject.EphemeralStorage = expandECSEphemeralStorage(v[0].(map[string]interface{})) } if v, ok := tfMap["execution_role_arn"].(string); ok && v != "" { @@ -1291,7 +1291,7 @@ func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride } if v, ok := tfMap["inference_accelerator_override"].([]interface{}); ok && len(v) > 0 { - apiObject.InferenceAcceleratorOverrides = expandEcsInferenceAcceleratorOverrides(v) + apiObject.InferenceAcceleratorOverrides = expandECSInferenceAcceleratorOverrides(v) } if v, ok := tfMap["memory"].(string); ok && v != "" { @@ -1305,7 +1305,7 @@ func expandEcsTaskOverride(tfMap map[string]interface{}) *types.EcsTaskOverride return apiObject } -func expandEcsContainerOverride(tfMap map[string]interface{}) *types.EcsContainerOverride { +func expandECSContainerOverride(tfMap map[string]interface{}) *types.EcsContainerOverride { if tfMap == nil { return nil } @@ -1321,11 +1321,11 @@ func expandEcsContainerOverride(tfMap map[string]interface{}) *types.EcsContaine } if v, ok := tfMap["environment"].([]interface{}); ok && len(v) > 0 { - apiObject.Environment = expandEcsEnvironmentVariables(v) + apiObject.Environment = expandECSEnvironmentVariables(v) } if v, ok := tfMap["environment_file"].([]interface{}); ok && len(v) > 0 { - apiObject.EnvironmentFiles = expandEcsEnvironmentFiles(v) + apiObject.EnvironmentFiles = expandECSEnvironmentFiles(v) } if v, ok := tfMap["memory"].(int); ok { @@ -1341,13 +1341,13 @@ func expandEcsContainerOverride(tfMap map[string]interface{}) *types.EcsContaine } if v, ok := tfMap["resource_requirement"].([]interface{}); ok && len(v) > 0 { - apiObject.ResourceRequirements = expandEcsResourceRequirements(v) + apiObject.ResourceRequirements = expandECSResourceRequirements(v) } return apiObject } -func expandEcsContainerOverrides(tfList []interface{}) []types.EcsContainerOverride { +func expandECSContainerOverrides(tfList []interface{}) []types.EcsContainerOverride { if len(tfList) == 0 { return nil } @@ -1361,7 +1361,7 @@ func expandEcsContainerOverrides(tfList []interface{}) []types.EcsContainerOverr continue } - apiObject := expandEcsContainerOverride(tfMap) + apiObject := expandECSContainerOverride(tfMap) if apiObject == nil { continue @@ -1373,7 +1373,7 @@ func expandEcsContainerOverrides(tfList []interface{}) []types.EcsContainerOverr return apiObjects } -func expandEcsEnvironmentVariable(tfMap map[string]interface{}) *types.EcsEnvironmentVariable { +func expandECSEnvironmentVariable(tfMap map[string]interface{}) *types.EcsEnvironmentVariable { if tfMap == nil { return nil } @@ -1391,7 +1391,7 @@ func expandEcsEnvironmentVariable(tfMap map[string]interface{}) *types.EcsEnviro return apiObject } -func expandEcsEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentVariable { +func expandECSEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentVariable { if len(tfList) == 0 { return nil } @@ -1405,7 +1405,7 @@ func expandEcsEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentV continue } - apiObject := expandEcsEnvironmentVariable(tfMap) + apiObject := expandECSEnvironmentVariable(tfMap) if apiObject == nil { continue @@ -1417,7 +1417,7 @@ func expandEcsEnvironmentVariables(tfList []interface{}) []types.EcsEnvironmentV return apiObjects } -func expandEcsEnvironmentFile(tfMap map[string]interface{}) *types.EcsEnvironmentFile { +func expandECSEnvironmentFile(tfMap map[string]interface{}) *types.EcsEnvironmentFile { if tfMap == nil { return nil } @@ -1435,7 +1435,7 @@ func expandEcsEnvironmentFile(tfMap map[string]interface{}) *types.EcsEnvironmen return apiObject } -func expandEcsEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile { +func expandECSEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile { if len(tfList) == 0 { return nil } @@ -1449,7 +1449,7 @@ func expandEcsEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile continue } - apiObject := expandEcsEnvironmentFile(tfMap) + apiObject := expandECSEnvironmentFile(tfMap) if apiObject == nil { continue @@ -1461,7 +1461,7 @@ func expandEcsEnvironmentFiles(tfList []interface{}) []types.EcsEnvironmentFile return apiObjects } -func expandEcsResourceRequirement(tfMap map[string]interface{}) *types.EcsResourceRequirement { +func expandECSResourceRequirement(tfMap map[string]interface{}) *types.EcsResourceRequirement { if tfMap == nil { return nil } @@ -1479,7 +1479,7 @@ func expandEcsResourceRequirement(tfMap map[string]interface{}) *types.EcsResour return apiObject } -func expandEcsResourceRequirements(tfList []interface{}) []types.EcsResourceRequirement { +func expandECSResourceRequirements(tfList []interface{}) []types.EcsResourceRequirement { if len(tfList) == 0 { return nil } @@ -1493,7 +1493,7 @@ func expandEcsResourceRequirements(tfList []interface{}) []types.EcsResourceRequ continue } - apiObject := expandEcsResourceRequirement(tfMap) + apiObject := expandECSResourceRequirement(tfMap) if apiObject == nil { continue @@ -1505,7 +1505,7 @@ func expandEcsResourceRequirements(tfList []interface{}) []types.EcsResourceRequ return apiObjects } -func expandEcsEphemeralStorage(tfMap map[string]interface{}) *types.EcsEphemeralStorage { +func expandECSEphemeralStorage(tfMap map[string]interface{}) *types.EcsEphemeralStorage { if tfMap == nil { return nil } @@ -1519,7 +1519,7 @@ func expandEcsEphemeralStorage(tfMap map[string]interface{}) *types.EcsEphemeral return apiObject } -func expandEcsInferenceAcceleratorOverride(tfMap map[string]interface{}) *types.EcsInferenceAcceleratorOverride { +func expandECSInferenceAcceleratorOverride(tfMap map[string]interface{}) *types.EcsInferenceAcceleratorOverride { if tfMap == nil { return nil } @@ -1537,7 +1537,7 @@ func expandEcsInferenceAcceleratorOverride(tfMap map[string]interface{}) *types. return apiObject } -func expandEcsInferenceAcceleratorOverrides(tfList []interface{}) []types.EcsInferenceAcceleratorOverride { +func expandECSInferenceAcceleratorOverrides(tfList []interface{}) []types.EcsInferenceAcceleratorOverride { if len(tfList) == 0 { return nil } @@ -1551,7 +1551,7 @@ func expandEcsInferenceAcceleratorOverrides(tfList []interface{}) []types.EcsInf continue } - apiObject := expandEcsInferenceAcceleratorOverride(tfMap) + apiObject := expandECSInferenceAcceleratorOverride(tfMap) if apiObject == nil { continue @@ -1681,7 +1681,7 @@ func expandPipeTargetEventBridgeEventBusParameters(tfMap map[string]interface{}) return apiObject } -func expandPipeTargetHttpParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { +func expandPipeTargetHTTPParameters(tfMap map[string]interface{}) *types.PipeTargetHttpParameters { if tfMap == nil { return nil } @@ -1823,7 +1823,7 @@ func expandSageMakerPipelineParameters(tfList []interface{}) []types.SageMakerPi return apiObjects } -func expandPipeTargetSqsQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { +func expandPipeTargetSQSQueueParameters(tfMap map[string]interface{}) *types.PipeTargetSqsQueueParameters { if tfMap == nil { return nil } @@ -1871,7 +1871,7 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri } if v := apiObject.EcsTaskParameters; v != nil { - tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetEcsTaskParameters(v)} + tfMap["cloudwatch_logs_parameters"] = []interface{}{flattenPipeTargetECSTaskParameters(v)} } if v := apiObject.EventBridgeEventBusParameters; v != nil { @@ -1879,7 +1879,7 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri } if v := apiObject.HttpParameters; v != nil { - tfMap["http_parameters"] = []interface{}{flattenPipeTargetHttpParameters(v)} + tfMap["http_parameters"] = []interface{}{flattenPipeTargetHTTPParameters(v)} } if v := apiObject.InputTemplate; v != nil { @@ -1903,7 +1903,7 @@ func flattenPipeTargetParameters(apiObject *types.PipeTargetParameters) map[stri } if v := apiObject.SqsQueueParameters; v != nil { - tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSqsQueueParameters(v)} + tfMap["sqs_queue_parameters"] = []interface{}{flattenPipeTargetSQSQueueParameters(v)} } if v := apiObject.StepFunctionStateMachineParameters; v != nil { @@ -2107,7 +2107,7 @@ func flattenPipeTargetCloudWatchLogsParameters(apiObject *types.PipeTargetCloudW return tfMap } -func flattenPipeTargetEcsTaskParameters(apiObject *types.PipeTargetEcsTaskParameters) map[string]interface{} { +func flattenPipeTargetECSTaskParameters(apiObject *types.PipeTargetEcsTaskParameters) map[string]interface{} { if apiObject == nil { return nil } @@ -2134,7 +2134,7 @@ func flattenPipeTargetEcsTaskParameters(apiObject *types.PipeTargetEcsTaskParame } if v := apiObject.Overrides; v != nil { - tfMap["overrides"] = []interface{}{flattenEcsTaskOverride(v)} + tfMap["overrides"] = []interface{}{flattenECSTaskOverride(v)} } if v := apiObject.PlacementConstraints; v != nil { @@ -2205,7 +2205,7 @@ func flattenCapacityProviderStrategyItems(apiObjects []types.CapacityProviderStr return tfList } -func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interface{} { +func flattenECSTaskOverride(apiObject *types.EcsTaskOverride) map[string]interface{} { if apiObject == nil { return nil } @@ -2213,7 +2213,7 @@ func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interfa tfMap := map[string]interface{}{} if v := apiObject.ContainerOverrides; v != nil { - tfMap["container_override"] = flattenEcsContainerOverrides(v) + tfMap["container_override"] = flattenECSContainerOverrides(v) } if v := apiObject.Cpu; v != nil { @@ -2221,7 +2221,7 @@ func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interfa } if v := apiObject.EphemeralStorage; v != nil { - tfMap["ephemeral_storage"] = []interface{}{flattenEcsEphemeralStorage(v)} + tfMap["ephemeral_storage"] = []interface{}{flattenECSEphemeralStorage(v)} } if v := apiObject.ExecutionRoleArn; v != nil { @@ -2229,7 +2229,7 @@ func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interfa } if v := apiObject.InferenceAcceleratorOverrides; v != nil { - tfMap["inference_accelerator_override"] = flattenEcsInferenceAcceleratorOverrides(v) + tfMap["inference_accelerator_override"] = flattenECSInferenceAcceleratorOverrides(v) } if v := apiObject.Memory; v != nil { @@ -2243,7 +2243,7 @@ func flattenEcsTaskOverride(apiObject *types.EcsTaskOverride) map[string]interfa return tfMap } -func flattenEcsContainerOverride(apiObject types.EcsContainerOverride) map[string]interface{} { +func flattenECSContainerOverride(apiObject types.EcsContainerOverride) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Command; v != nil { @@ -2255,11 +2255,11 @@ func flattenEcsContainerOverride(apiObject types.EcsContainerOverride) map[strin } if v := apiObject.Environment; v != nil { - tfMap["environment"] = flattenEcsEnvironmentVariables(v) + tfMap["environment"] = flattenECSEnvironmentVariables(v) } if v := apiObject.EnvironmentFiles; v != nil { - tfMap["environment_file"] = flattenEcsEnvironmentFiles(v) + tfMap["environment_file"] = flattenECSEnvironmentFiles(v) } if v := apiObject.Memory; v != nil { @@ -2275,13 +2275,13 @@ func flattenEcsContainerOverride(apiObject types.EcsContainerOverride) map[strin } if v := apiObject.ResourceRequirements; v != nil { - tfMap["resource_requirement"] = flattenEcsResourceRequirements(v) + tfMap["resource_requirement"] = flattenECSResourceRequirements(v) } return tfMap } -func flattenEcsContainerOverrides(apiObjects []types.EcsContainerOverride) []interface{} { +func flattenECSContainerOverrides(apiObjects []types.EcsContainerOverride) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2289,13 +2289,13 @@ func flattenEcsContainerOverrides(apiObjects []types.EcsContainerOverride) []int var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenEcsContainerOverride(apiObject)) + tfList = append(tfList, flattenECSContainerOverride(apiObject)) } return tfList } -func flattenEcsResourceRequirement(apiObject types.EcsResourceRequirement) map[string]interface{} { +func flattenECSResourceRequirement(apiObject types.EcsResourceRequirement) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Type; v != "" { @@ -2309,7 +2309,7 @@ func flattenEcsResourceRequirement(apiObject types.EcsResourceRequirement) map[s return tfMap } -func flattenEcsResourceRequirements(apiObjects []types.EcsResourceRequirement) []interface{} { +func flattenECSResourceRequirements(apiObjects []types.EcsResourceRequirement) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2317,13 +2317,13 @@ func flattenEcsResourceRequirements(apiObjects []types.EcsResourceRequirement) [ var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenEcsResourceRequirement(apiObject)) + tfList = append(tfList, flattenECSResourceRequirement(apiObject)) } return tfList } -func flattenEcsEnvironmentFile(apiObject types.EcsEnvironmentFile) map[string]interface{} { +func flattenECSEnvironmentFile(apiObject types.EcsEnvironmentFile) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Type; v != "" { @@ -2337,7 +2337,7 @@ func flattenEcsEnvironmentFile(apiObject types.EcsEnvironmentFile) map[string]in return tfMap } -func flattenEcsEnvironmentVariable(apiObject types.EcsEnvironmentVariable) map[string]interface{} { +func flattenECSEnvironmentVariable(apiObject types.EcsEnvironmentVariable) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Name; v != nil { @@ -2351,7 +2351,7 @@ func flattenEcsEnvironmentVariable(apiObject types.EcsEnvironmentVariable) map[s return tfMap } -func flattenEcsEnvironmentVariables(apiObjects []types.EcsEnvironmentVariable) []interface{} { +func flattenECSEnvironmentVariables(apiObjects []types.EcsEnvironmentVariable) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2359,13 +2359,13 @@ func flattenEcsEnvironmentVariables(apiObjects []types.EcsEnvironmentVariable) [ var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenEcsEnvironmentVariable(apiObject)) + tfList = append(tfList, flattenECSEnvironmentVariable(apiObject)) } return tfList } -func flattenEcsEnvironmentFiles(apiObjects []types.EcsEnvironmentFile) []interface{} { +func flattenECSEnvironmentFiles(apiObjects []types.EcsEnvironmentFile) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2373,13 +2373,13 @@ func flattenEcsEnvironmentFiles(apiObjects []types.EcsEnvironmentFile) []interfa var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenEcsEnvironmentFile(apiObject)) + tfList = append(tfList, flattenECSEnvironmentFile(apiObject)) } return tfList } -func flattenEcsEphemeralStorage(apiObject *types.EcsEphemeralStorage) map[string]interface{} { +func flattenECSEphemeralStorage(apiObject *types.EcsEphemeralStorage) map[string]interface{} { if apiObject == nil { return nil } @@ -2391,7 +2391,7 @@ func flattenEcsEphemeralStorage(apiObject *types.EcsEphemeralStorage) map[string return tfMap } -func flattenEcsInferenceAcceleratorOverride(apiObject types.EcsInferenceAcceleratorOverride) map[string]interface{} { +func flattenECSInferenceAcceleratorOverride(apiObject types.EcsInferenceAcceleratorOverride) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.DeviceName; v != nil { @@ -2405,7 +2405,7 @@ func flattenEcsInferenceAcceleratorOverride(apiObject types.EcsInferenceAccelera return tfMap } -func flattenEcsInferenceAcceleratorOverrides(apiObjects []types.EcsInferenceAcceleratorOverride) []interface{} { +func flattenECSInferenceAcceleratorOverrides(apiObjects []types.EcsInferenceAcceleratorOverride) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2413,7 +2413,7 @@ func flattenEcsInferenceAcceleratorOverrides(apiObjects []types.EcsInferenceAcce var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenEcsInferenceAcceleratorOverride(apiObject)) + tfList = append(tfList, flattenECSInferenceAcceleratorOverride(apiObject)) } return tfList @@ -2427,13 +2427,13 @@ func flattenNetworkConfiguration(apiObject *types.NetworkConfiguration) map[stri tfMap := map[string]interface{}{} if v := apiObject.AwsvpcConfiguration; v != nil { - tfMap["aws_vpc_configuration"] = []interface{}{flattenAWSVPCConfiguration(v)} + tfMap["aws_vpc_configuration"] = []interface{}{flattenVPCConfiguration(v)} } return tfMap } -func flattenAWSVPCConfiguration(apiObject *types.AwsVpcConfiguration) map[string]interface{} { +func flattenVPCConfiguration(apiObject *types.AwsVpcConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -2541,7 +2541,7 @@ func flattenPipeTargetEventBridgeEventBusParameters(apiObject *types.PipeTargetE return tfMap } -func flattenPipeTargetHttpParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { +func flattenPipeTargetHTTPParameters(apiObject *types.PipeTargetHttpParameters) map[string]interface{} { if apiObject == nil { return nil } @@ -2665,7 +2665,7 @@ func flattenSageMakerPipelineParameters(apiObjects []types.SageMakerPipelinePara return tfList } -func flattenPipeTargetSqsQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { +func flattenPipeTargetSQSQueueParameters(apiObject *types.PipeTargetSqsQueueParameters) map[string]interface{} { if apiObject == nil { return nil } From 517e8a2cf83cb67bd4e490496d6ca63579534f42 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:31:22 -0400 Subject: [PATCH 62/65] Add NOTE to CHANGELOG entries. --- .changelog/31607.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/31607.txt b/.changelog/31607.txt index ae99e2b8e3f..7344c3e368c 100644 --- a/.changelog/31607.txt +++ b/.changelog/31607.txt @@ -3,9 +3,9 @@ resource/aws_pipes_pipe: Add `enrichment_parameters` argument ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_parameters`, `kinesis_stream_parameters`, `managed_streaming_kafka_parameters`, `rabbitmq_broker_parameters`, `self_managed_kafka_parameters` and `sqs_queue_parameters` attributes to the `source_parameters` configuration block +resource/aws_pipes_pipe: Add `activemq_broker_parameters`, `dynamodb_stream_parameters`, `kinesis_stream_parameters`, `managed_streaming_kafka_parameters`, `rabbitmq_broker_parameters`, `self_managed_kafka_parameters` and `sqs_queue_parameters` attributes to the `source_parameters` configuration block. NOTE: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing ``` ```release-note:enhancement -resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task_parameters`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block +resource/aws_pipes_pipe: Add `batch_job_parameters`, `cloudwatch_logs_parameters`, `ecs_task_parameters`, `eventbridge_event_bus_parameters`, `http_parameters`, `kinesis_stream_parameters`, `lambda_function_parameters`, `redshift_data_parameters`, `sagemaker_pipeline_parameters`, `sqs_queue_parameters` and `step_function_state_machine_parameters` attributes to the `target_parameters` configuration block. NOTE: Because we cannot easily test all this functionality, it is best effort and we ask for community help in testing ``` \ No newline at end of file From bbb25ff0a1e87f116747e34bbe24c09ef543bb34 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:34:22 -0400 Subject: [PATCH 63/65] Fix golangci-lint 'paralleltest'. --- internal/types/zero_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/types/zero_test.go b/internal/types/zero_test.go index 037bdc5a6a3..d8e38702a50 100644 --- a/internal/types/zero_test.go +++ b/internal/types/zero_test.go @@ -40,6 +40,8 @@ func TestIsZero(t *testing.T) { testCase := testCase t.Run(testCase.Name, func(t *testing.T) { + t.Parallel() + got := IsZero(testCase.Ptr) if got != testCase.Expected { From a2c7ca6dd4f5326d72d2bea34fddf72782a5a1c2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:42:39 -0400 Subject: [PATCH 64/65] Fix golangci-lint 'unused'. --- internal/service/pipes/flex.go | 40 ---------------------------------- 1 file changed, 40 deletions(-) delete mode 100644 internal/service/pipes/flex.go diff --git a/internal/service/pipes/flex.go b/internal/service/pipes/flex.go deleted file mode 100644 index 63287740210..00000000000 --- a/internal/service/pipes/flex.go +++ /dev/null @@ -1,40 +0,0 @@ -package pipes - -func expandString(key string, param map[string]interface{}) *string { - if val, ok := param[key]; ok { - if value, ok := val.(string); ok { - if value != "" { - return &value - } - } - } - return nil -} - -func expandInt32(key string, param map[string]interface{}) *int32 { - if val, ok := param[key]; ok { - if value, ok := val.(int); ok { - i := int32(value) - return &i - } - } - return nil -} - -func expandBool(key string, param map[string]interface{}) bool { - if val, ok := param[key]; ok { - if value, ok := val.(bool); ok { - return value - } - } - return false -} - -func expandStringValue(key string, param map[string]interface{}) string { - if val, ok := param[key]; ok { - if value, ok := val.(string); ok { - return value - } - } - return "" -} From 969b6a4d15bcc9e435743f997e80b220ad82be8c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 26 Jun 2023 17:45:50 -0400 Subject: [PATCH 65/65] Fix golangci-lint 'staticcheck'. --- internal/service/pipes/pipe_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/pipes/pipe_test.go b/internal/service/pipes/pipe_test.go index df0b1e022a9..c6a093fe1f0 100644 --- a/internal/service/pipes/pipe_test.go +++ b/internal/service/pipes/pipe_test.go @@ -905,13 +905,13 @@ func TestAccPipesPipe_rabbitMQSourceEventBusTarget(t *testing.T) { } func TestAccPipesPipe_mskSourceHTTPTarget(t *testing.T) { + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" - acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -981,13 +981,13 @@ func TestAccPipesPipe_mskSourceHTTPTarget(t *testing.T) { } func TestAccPipesPipe_selfManagedKafkaSourceLambdaFunctionTarget(t *testing.T) { + acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") + ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" - acctest.Skip(t, "DependencyViolation errors deleting subnets and security group") - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -1128,13 +1128,13 @@ func TestAccPipesPipe_sqsSourceRedshiftTarget(t *testing.T) { } func TestAccPipesPipe_SourceSageMakerTarget(t *testing.T) { + acctest.Skip(t, "aws_sagemaker_pipeline resource not yet implemented") + ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" - acctest.Skip(t, "aws_sagemaker_pipeline resource not yet implemented") - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) @@ -1279,13 +1279,13 @@ func TestAccPipesPipe_sqsSourceBatchJobTarget(t *testing.T) { } func TestAccPipesPipe_sqsSourceECSTaskTarget(t *testing.T) { + acctest.Skip(t, "ValidationException: [numeric instance is lower than the required minimum (minimum: 1, found: 0)]") + ctx := acctest.Context(t) var pipe pipes.DescribePipeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_pipes_pipe.test" - acctest.Skip(t, "ValidationException: [numeric instance is lower than the required minimum (minimum: 1, found: 0)]") - resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t)