diff --git a/aws/diff_suppress_funcs.go b/aws/diff_suppress_funcs.go index 0e340e4351e..b1bb9f28e71 100644 --- a/aws/diff_suppress_funcs.go +++ b/aws/diff_suppress_funcs.go @@ -132,3 +132,10 @@ func suppressRoute53ZoneNameWithTrailingDot(k, old, new string, d *schema.Resour } return strings.TrimSuffix(old, ".") == strings.TrimSuffix(new, ".") } + +/* +// suppressStringsEqualFold suppresses any difference between two strings that are equal under Unicode case-folding. +func suppressStringsEqualFold(k, old, new string, d *schema.ResourceData) bool { + return strings.EqualFold(old, new) +} +*/ diff --git a/aws/resource_aws_appmesh_mesh.go b/aws/resource_aws_appmesh_mesh.go index 7ab40e45c40..2fe5c7e497e 100644 --- a/aws/resource_aws_appmesh_mesh.go +++ b/aws/resource_aws_appmesh_mesh.go @@ -189,3 +189,43 @@ func resourceAwsAppmeshMeshDelete(d *schema.ResourceData, meta interface{}) erro return nil } + +func expandAppmeshMeshSpec(vSpec []interface{}) *appmesh.MeshSpec { + spec := &appmesh.MeshSpec{} + + if len(vSpec) == 0 || vSpec[0] == nil { + // Empty Spec is allowed. + return spec + } + mSpec := vSpec[0].(map[string]interface{}) + + if vEgressFilter, ok := mSpec["egress_filter"].([]interface{}); ok && len(vEgressFilter) > 0 && vEgressFilter[0] != nil { + mEgressFilter := vEgressFilter[0].(map[string]interface{}) + + if vType, ok := mEgressFilter["type"].(string); ok && vType != "" { + spec.EgressFilter = &appmesh.EgressFilter{ + Type: aws.String(vType), + } + } + } + + return spec +} + +func flattenAppmeshMeshSpec(spec *appmesh.MeshSpec) []interface{} { + if spec == nil { + return []interface{}{} + } + + mSpec := map[string]interface{}{} + + if spec.EgressFilter != nil { + mSpec["egress_filter"] = []interface{}{ + map[string]interface{}{ + "type": aws.StringValue(spec.EgressFilter.Type), + }, + } + } + + return []interface{}{mSpec} +} diff --git a/aws/resource_aws_appmesh_route.go b/aws/resource_aws_appmesh_route.go index 128f1c10040..bb16df365c8 100644 --- a/aws/resource_aws_appmesh_route.go +++ b/aws/resource_aws_appmesh_route.go @@ -26,11 +26,19 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 255), + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "last_updated_date": { + Type: schema.TypeString, + Computed: true, }, "mesh_name": { @@ -40,7 +48,7 @@ func resourceAwsAppmeshRoute() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 255), }, - "virtual_router_name": { + "name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -89,7 +97,7 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, }, }, - Set: appmeshRouteWeightedTargetHash, + Set: appmeshWeightedTargetHash, }, }, }, @@ -102,11 +110,113 @@ func resourceAwsAppmeshRoute() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "header": { + Type: schema.TypeSet, + Optional: true, + MinItems: 0, + MaxItems: 10, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "invert": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "match": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exact": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + + "range": { + Type: schema.TypeList, + Optional: true, + MinItems: 0, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end": { + Type: schema.TypeInt, + Required: true, + }, + + "start": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "regex": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + + "suffix": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 255), + }, + }, + }, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 50), + }, + }, + }, + Set: appmeshHttpRouteHeaderHash, + }, + + "method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + appmesh.HttpMethodConnect, + appmesh.HttpMethodDelete, + appmesh.HttpMethodGet, + appmesh.HttpMethodHead, + appmesh.HttpMethodOptions, + appmesh.HttpMethodPatch, + appmesh.HttpMethodPost, + appmesh.HttpMethodPut, + appmesh.HttpMethodTrace, + }, false), + }, + "prefix": { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringMatch(regexp.MustCompile(`^/`), "must start with /"), }, + + "scheme": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + appmesh.HttpSchemeHttp, + appmesh.HttpSchemeHttps, + }, false), + }, }, }, }, @@ -114,6 +224,12 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, }, + "priority": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 1000), + }, + "tcp_route": { Type: schema.TypeList, Optional: true, @@ -149,7 +265,7 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, }, }, - Set: appmeshRouteWeightedTargetHash, + Set: appmeshWeightedTargetHash, }, }, }, @@ -161,22 +277,14 @@ func resourceAwsAppmeshRoute() *schema.Resource { }, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, + "tags": tagsSchema(), - "last_updated_date": { - Type: schema.TypeString, - Computed: true, + "virtual_router_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 255), }, - - "tags": tagsSchema(), }, } } @@ -187,12 +295,12 @@ func resourceAwsAppmeshRouteCreate(d *schema.ResourceData, meta interface{}) err req := &appmesh.CreateRouteInput{ MeshName: aws.String(d.Get("mesh_name").(string)), RouteName: aws.String(d.Get("name").(string)), - VirtualRouterName: aws.String(d.Get("virtual_router_name").(string)), Spec: expandAppmeshRouteSpec(d.Get("spec").([]interface{})), Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), + VirtualRouterName: aws.String(d.Get("virtual_router_name").(string)), } - log.Printf("[DEBUG] Creating App Mesh route: %#v", req) + log.Printf("[DEBUG] Creating App Mesh route: %s", req) resp, err := conn.CreateRoute(req) if err != nil { return fmt.Errorf("error creating App Mesh route: %s", err) @@ -217,7 +325,7 @@ func resourceAwsAppmeshRouteRead(d *schema.ResourceData, meta interface{}) error return nil } if err != nil { - return fmt.Errorf("error reading App Mesh route: %s", err) + return fmt.Errorf("error reading App Mesh route (%s): %s", d.Id(), err) } if aws.StringValue(resp.Route.Status.Status) == appmesh.RouteStatusCodeDeleted { log.Printf("[WARN] App Mesh route (%s) not found, removing from state", d.Id()) @@ -225,16 +333,16 @@ func resourceAwsAppmeshRouteRead(d *schema.ResourceData, meta interface{}) error return nil } - d.Set("name", resp.Route.RouteName) - d.Set("mesh_name", resp.Route.MeshName) - d.Set("virtual_router_name", resp.Route.VirtualRouterName) d.Set("arn", resp.Route.Metadata.Arn) d.Set("created_date", resp.Route.Metadata.CreatedAt.Format(time.RFC3339)) d.Set("last_updated_date", resp.Route.Metadata.LastUpdatedAt.Format(time.RFC3339)) + d.Set("mesh_name", resp.Route.MeshName) + d.Set("name", resp.Route.RouteName) err = d.Set("spec", flattenAppmeshRouteSpec(resp.Route.Spec)) if err != nil { return fmt.Errorf("error setting spec: %s", err) } + d.Set("virtual_router_name", resp.Route.VirtualRouterName) err = saveTagsAppmesh(conn, d, aws.StringValue(resp.Route.Metadata.Arn)) if isAWSErr(err, appmesh.ErrCodeNotFoundException, "") { @@ -257,14 +365,14 @@ func resourceAwsAppmeshRouteUpdate(d *schema.ResourceData, meta interface{}) err req := &appmesh.UpdateRouteInput{ MeshName: aws.String(d.Get("mesh_name").(string)), RouteName: aws.String(d.Get("name").(string)), - VirtualRouterName: aws.String(d.Get("virtual_router_name").(string)), Spec: expandAppmeshRouteSpec(v.([]interface{})), + VirtualRouterName: aws.String(d.Get("virtual_router_name").(string)), } - log.Printf("[DEBUG] Updating App Mesh route: %#v", req) + log.Printf("[DEBUG] Updating App Mesh route: %s", req) _, err := conn.UpdateRoute(req) if err != nil { - return fmt.Errorf("error updating App Mesh route: %s", err) + return fmt.Errorf("error updating App Mesh route (%s): %s", d.Id(), err) } } @@ -294,7 +402,7 @@ func resourceAwsAppmeshRouteDelete(d *schema.ResourceData, meta interface{}) err return nil } if err != nil { - return fmt.Errorf("error deleting App Mesh route: %s", err) + return fmt.Errorf("error deleting App Mesh route (%s): %s", d.Id(), err) } return nil @@ -323,14 +431,311 @@ func resourceAwsAppmeshRouteImport(d *schema.ResourceData, meta interface{}) ([] } d.SetId(aws.StringValue(resp.Route.Metadata.Uid)) - d.Set("name", resp.Route.RouteName) d.Set("mesh_name", resp.Route.MeshName) + d.Set("name", resp.Route.RouteName) d.Set("virtual_router_name", resp.Route.VirtualRouterName) return []*schema.ResourceData{d}, nil } -func appmeshRouteWeightedTargetHash(v interface{}) int { +func expandAppmeshRouteSpec(vSpec []interface{}) *appmesh.RouteSpec { + spec := &appmesh.RouteSpec{} + + if len(vSpec) == 0 || vSpec[0] == nil { + // Empty Spec is allowed. + return spec + } + mSpec := vSpec[0].(map[string]interface{}) + + if vPriority, ok := mSpec["priority"].(int); ok && vPriority > 0 { + spec.Priority = aws.Int64(int64(vPriority)) + } + + if vHttpRoute, ok := mSpec["http_route"].([]interface{}); ok && len(vHttpRoute) > 0 && vHttpRoute[0] != nil { + mHttpRoute := vHttpRoute[0].(map[string]interface{}) + + spec.HttpRoute = &appmesh.HttpRoute{} + + if vHttpRouteAction, ok := mHttpRoute["action"].([]interface{}); ok && len(vHttpRouteAction) > 0 && vHttpRouteAction[0] != nil { + mHttpRouteAction := vHttpRouteAction[0].(map[string]interface{}) + + if vWeightedTargets, ok := mHttpRouteAction["weighted_target"].(*schema.Set); ok && vWeightedTargets.Len() > 0 { + weightedTargets := []*appmesh.WeightedTarget{} + + for _, vWeightedTarget := range vWeightedTargets.List() { + weightedTarget := &appmesh.WeightedTarget{} + + mWeightedTarget := vWeightedTarget.(map[string]interface{}) + + if vVirtualNode, ok := mWeightedTarget["virtual_node"].(string); ok && vVirtualNode != "" { + weightedTarget.VirtualNode = aws.String(vVirtualNode) + } + if vWeight, ok := mWeightedTarget["weight"].(int); ok && vWeight > 0 { + weightedTarget.Weight = aws.Int64(int64(vWeight)) + } + + weightedTargets = append(weightedTargets, weightedTarget) + } + + spec.HttpRoute.Action = &appmesh.HttpRouteAction{ + WeightedTargets: weightedTargets, + } + } + } + + if vHttpRouteMatch, ok := mHttpRoute["match"].([]interface{}); ok && len(vHttpRouteMatch) > 0 && vHttpRouteMatch[0] != nil { + httpRouteMatch := &appmesh.HttpRouteMatch{} + + mHttpRouteMatch := vHttpRouteMatch[0].(map[string]interface{}) + + if vMethod, ok := mHttpRouteMatch["method"].(string); ok && vMethod != "" { + httpRouteMatch.Method = aws.String(vMethod) + } + if vPrefix, ok := mHttpRouteMatch["prefix"].(string); ok && vPrefix != "" { + httpRouteMatch.Prefix = aws.String(vPrefix) + } + if vScheme, ok := mHttpRouteMatch["scheme"].(string); ok && vScheme != "" { + httpRouteMatch.Scheme = aws.String(vScheme) + } + + if vHttpRouteHeaders, ok := mHttpRouteMatch["header"].(*schema.Set); ok && vHttpRouteHeaders.Len() > 0 { + httpRouteHeaders := []*appmesh.HttpRouteHeader{} + + for _, vHttpRouteHeader := range vHttpRouteHeaders.List() { + httpRouteHeader := &appmesh.HttpRouteHeader{} + + mHttpRouteHeader := vHttpRouteHeader.(map[string]interface{}) + + if vInvert, ok := mHttpRouteHeader["invert"].(bool); ok { + httpRouteHeader.Invert = aws.Bool(vInvert) + } + if vName, ok := mHttpRouteHeader["name"].(string); ok && vName != "" { + httpRouteHeader.Name = aws.String(vName) + } + + if vMatch, ok := mHttpRouteHeader["match"].([]interface{}); ok && len(vMatch) > 0 && vMatch[0] != nil { + httpRouteHeader.Match = &appmesh.HeaderMatchMethod{} + + mMatch := vMatch[0].(map[string]interface{}) + + if vExact, ok := mMatch["exact"].(string); ok && vExact != "" { + httpRouteHeader.Match.Exact = aws.String(vExact) + } + if vPrefix, ok := mMatch["prefix"].(string); ok && vPrefix != "" { + httpRouteHeader.Match.Prefix = aws.String(vPrefix) + } + if vRegex, ok := mMatch["regex"].(string); ok && vRegex != "" { + httpRouteHeader.Match.Regex = aws.String(vRegex) + } + if vSuffix, ok := mMatch["suffix"].(string); ok && vSuffix != "" { + httpRouteHeader.Match.Suffix = aws.String(vSuffix) + } + + if vRange, ok := mMatch["range"].([]interface{}); ok && len(vRange) > 0 && vRange[0] != nil { + httpRouteHeader.Match.Range = &appmesh.MatchRange{} + + mRange := vRange[0].(map[string]interface{}) + + if vEnd, ok := mRange["end"].(int); ok && vEnd > 0 { + httpRouteHeader.Match.Range.End = aws.Int64(int64(vEnd)) + } + if vStart, ok := mRange["start"].(int); ok && vStart > 0 { + httpRouteHeader.Match.Range.Start = aws.Int64(int64(vStart)) + } + } + } + + httpRouteHeaders = append(httpRouteHeaders, httpRouteHeader) + } + + httpRouteMatch.Headers = httpRouteHeaders + } + + spec.HttpRoute.Match = httpRouteMatch + } + } + + if vTcpRoute, ok := mSpec["tcp_route"].([]interface{}); ok && len(vTcpRoute) > 0 && vTcpRoute[0] != nil { + mTcpRoute := vTcpRoute[0].(map[string]interface{}) + + spec.TcpRoute = &appmesh.TcpRoute{} + + if vTcpRouteAction, ok := mTcpRoute["action"].([]interface{}); ok && len(vTcpRouteAction) > 0 && vTcpRouteAction[0] != nil { + mTcpRouteAction := vTcpRouteAction[0].(map[string]interface{}) + + if vWeightedTargets, ok := mTcpRouteAction["weighted_target"].(*schema.Set); ok && vWeightedTargets.Len() > 0 { + weightedTargets := []*appmesh.WeightedTarget{} + + for _, vWeightedTarget := range vWeightedTargets.List() { + weightedTarget := &appmesh.WeightedTarget{} + + mWeightedTarget := vWeightedTarget.(map[string]interface{}) + + if vVirtualNode, ok := mWeightedTarget["virtual_node"].(string); ok && vVirtualNode != "" { + weightedTarget.VirtualNode = aws.String(vVirtualNode) + } + if vWeight, ok := mWeightedTarget["weight"].(int); ok && vWeight > 0 { + weightedTarget.Weight = aws.Int64(int64(vWeight)) + } + + weightedTargets = append(weightedTargets, weightedTarget) + } + + spec.TcpRoute.Action = &appmesh.TcpRouteAction{ + WeightedTargets: weightedTargets, + } + } + } + } + + return spec +} + +func flattenAppmeshRouteSpec(spec *appmesh.RouteSpec) []interface{} { + if spec == nil { + return []interface{}{} + } + + mSpec := map[string]interface{}{ + "priority": int(aws.Int64Value(spec.Priority)), + } + + if httpRoute := spec.HttpRoute; httpRoute != nil { + mHttpRoute := map[string]interface{}{} + + if action := httpRoute.Action; action != nil { + if weightedTargets := action.WeightedTargets; weightedTargets != nil { + vWeightedTargets := []interface{}{} + + for _, weightedTarget := range weightedTargets { + mWeightedTarget := map[string]interface{}{ + "virtual_node": aws.StringValue(weightedTarget.VirtualNode), + "weight": int(aws.Int64Value(weightedTarget.Weight)), + } + + vWeightedTargets = append(vWeightedTargets, mWeightedTarget) + } + + mHttpRoute["action"] = []interface{}{ + map[string]interface{}{ + "weighted_target": schema.NewSet(appmeshWeightedTargetHash, vWeightedTargets), + }, + } + } + } + + if httpRouteMatch := httpRoute.Match; httpRouteMatch != nil { + vHttpRouteHeaders := []interface{}{} + + for _, httpRouteHeader := range httpRouteMatch.Headers { + mHttpRouteHeader := map[string]interface{}{ + "invert": aws.BoolValue(httpRouteHeader.Invert), + "name": aws.StringValue(httpRouteHeader.Name), + } + + if match := httpRouteHeader.Match; match != nil { + mMatch := map[string]interface{}{ + "exact": aws.StringValue(match.Exact), + "prefix": aws.StringValue(match.Prefix), + "regex": aws.StringValue(match.Regex), + "suffix": aws.StringValue(match.Suffix), + } + + if r := match.Range; r != nil { + mRange := map[string]interface{}{ + "end": int(aws.Int64Value(r.End)), + "start": int(aws.Int64Value(r.Start)), + } + + mMatch["range"] = []interface{}{mRange} + } + + mHttpRouteHeader["match"] = []interface{}{mMatch} + } + + vHttpRouteHeaders = append(vHttpRouteHeaders, mHttpRouteHeader) + } + + mHttpRoute["match"] = []interface{}{ + map[string]interface{}{ + "header": schema.NewSet(appmeshHttpRouteHeaderHash, vHttpRouteHeaders), + "method": aws.StringValue(httpRouteMatch.Method), + "prefix": aws.StringValue(httpRouteMatch.Prefix), + "scheme": aws.StringValue(httpRouteMatch.Scheme), + }, + } + } + + mSpec["http_route"] = []interface{}{mHttpRoute} + } + + if tcpRoute := spec.TcpRoute; tcpRoute != nil { + mTcpRoute := map[string]interface{}{} + + if action := tcpRoute.Action; action != nil { + if weightedTargets := action.WeightedTargets; weightedTargets != nil { + vWeightedTargets := []interface{}{} + + for _, weightedTarget := range weightedTargets { + mWeightedTarget := map[string]interface{}{ + "virtual_node": aws.StringValue(weightedTarget.VirtualNode), + "weight": int(aws.Int64Value(weightedTarget.Weight)), + } + + vWeightedTargets = append(vWeightedTargets, mWeightedTarget) + } + + mTcpRoute["action"] = []interface{}{ + map[string]interface{}{ + "weighted_target": schema.NewSet(appmeshWeightedTargetHash, vWeightedTargets), + }, + } + } + } + + mSpec["tcp_route"] = []interface{}{mTcpRoute} + } + + return []interface{}{mSpec} +} + +func appmeshHttpRouteHeaderHash(vHttpRouteHeader interface{}) int { + var buf bytes.Buffer + mHttpRouteHeader := vHttpRouteHeader.(map[string]interface{}) + if v, ok := mHttpRouteHeader["invert"].(bool); ok { + buf.WriteString(fmt.Sprintf("%t-", v)) + } + if vMatch, ok := mHttpRouteHeader["match"].([]interface{}); ok && len(vMatch) > 0 && vMatch[0] != nil { + mMatch := vMatch[0].(map[string]interface{}) + if v, ok := mMatch["exact"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mMatch["prefix"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if vRange, ok := mMatch["range"].([]interface{}); ok && len(vRange) > 0 && vRange[0] != nil { + mRange := vRange[0].(map[string]interface{}) + if v, ok := mRange["end"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mRange["start"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + } + if v, ok := mMatch["regex"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mMatch["suffix"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + if v, ok := mHttpRouteHeader["name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + return hashcode.String(buf.String()) +} + +func appmeshWeightedTargetHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) if v, ok := m["virtual_node"].(string); ok { diff --git a/aws/resource_aws_appmesh_route_test.go b/aws/resource_aws_appmesh_route_test.go index d0019d29f6c..5e7ba1004ba 100644 --- a/aws/resource_aws_appmesh_route_test.go +++ b/aws/resource_aws_appmesh_route_test.go @@ -3,7 +3,6 @@ package aws import ( "fmt" "log" - "regexp" "testing" "github.com/aws/aws-sdk-go/aws" @@ -16,11 +15,11 @@ import ( func init() { resource.AddTestSweepers("aws_appmesh_route", &resource.Sweeper{ Name: "aws_appmesh_route", - F: testSweepAppmeshRoutes, + F: testSweepAwsAppmeshRoutes, }) } -func testSweepAppmeshRoutes(region string) error { +func testSweepAwsAppmeshRoutes(region string) error { client, err := sharedClientForRegion(region) if err != nil { return fmt.Errorf("error getting client: %s", err) @@ -102,57 +101,149 @@ func testSweepAppmeshRoutes(region string) error { func testAccAwsAppmeshRoute_httpRoute(t *testing.T) { var r appmesh.RouteData - resourceName := "aws_appmesh_route.foo" - meshName := fmt.Sprintf("tf-test-mesh-%d", acctest.RandInt()) - vrName := fmt.Sprintf("tf-test-router-%d", acctest.RandInt()) - vn1Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - vn2Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - rName := fmt.Sprintf("tf-test-route-%d", acctest.RandInt()) + resourceName := "aws_appmesh_route.test" + rName := fmt.Sprintf("tf-testacc-appmesh-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAppmeshRouteDestroy, + CheckDestroy: testAccCheckAwsAppmeshRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshRouteConfig_httpRoute(meshName, vrName, vn1Name, vn2Name, rName), + Config: testAccAwsAppmeshRouteConfig_httpRoute(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - resource.TestCheckResourceAttr(resourceName, "virtual_router_name", vrName), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:appmesh:[^:]+:\\d{12}:mesh/%s/virtualRouter/%s/route/%s", meshName, vrName, rName))), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { - Config: testAccAppmeshRouteConfig_httpRouteUpdated(meshName, vrName, vn1Name, vn2Name, rName), + Config: testAccAwsAppmeshRouteConfig_httpRouteUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - resource.TestCheckResourceAttr(resourceName, "virtual_router_name", vrName), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "2"), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/path"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAwsAppmeshRoute_httpHeader(t *testing.T) { + var r appmesh.RouteData + resourceName := "aws_appmesh_route.test" + rName := fmt.Sprintf("tf-testacc-appmesh-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsAppmeshRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsAppmeshRouteConfig_httpHeader(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), resource.TestCheckResourceAttrSet(resourceName, "created_date"), resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:appmesh:[^:]+:\\d{12}:mesh/%s/virtualRouter/%s/route/%s", meshName, vrName, rName))), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2366200004.invert", "false"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2366200004.match.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2366200004.name", "X-Testing1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", "POST"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", "http"), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), + ), + }, + { + Config: testAccAwsAppmeshRouteConfig_httpHeaderUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "2"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2971741486.invert", "true"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2971741486.match.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.2971741486.name", "X-Testing1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.invert", "false"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.exact", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.prefix", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.range.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.range.0.end", "7"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.range.0.start", "2"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.regex", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.match.0.suffix", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.3147536248.name", "X-Testing2"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", "PUT"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/path"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", "https"), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s/%s", meshName, vrName, rName), + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), ImportState: true, ImportStateVerify: true, }, @@ -162,53 +253,55 @@ func testAccAwsAppmeshRoute_httpRoute(t *testing.T) { func testAccAwsAppmeshRoute_tcpRoute(t *testing.T) { var r appmesh.RouteData - resourceName := "aws_appmesh_route.foo" - meshName := fmt.Sprintf("tf-test-mesh-%d", acctest.RandInt()) - vrName := fmt.Sprintf("tf-test-router-%d", acctest.RandInt()) - vn1Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - vn2Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - rName := fmt.Sprintf("tf-test-route-%d", acctest.RandInt()) + resourceName := "aws_appmesh_route.test" + rName := fmt.Sprintf("tf-testacc-appmesh-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAppmeshRouteDestroy, + CheckDestroy: testAccCheckAwsAppmeshRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshRouteConfig_tcpRoute(meshName, vrName, vn1Name, vn2Name, rName), + Config: testAccAwsAppmeshRouteConfig_tcpRoute(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - resource.TestCheckResourceAttr(resourceName, "virtual_router_name", vrName), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.0.action.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.0.action.0.weighted_target.#", "1"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:appmesh:[^:]+:\\d{12}:mesh/%s/virtualRouter/%s/route/%s", meshName, vrName, rName))), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { - Config: testAccAppmeshRouteConfig_tcpRouteUpdated(meshName, vrName, vn1Name, vn2Name, rName), + Config: testAccAwsAppmeshRouteConfig_tcpRouteUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "mesh_name", meshName), - resource.TestCheckResourceAttr(resourceName, "virtual_router_name", vrName), resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.0.action.#", "1"), resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.0.action.0.weighted_target.#", "2"), - resource.TestCheckResourceAttrSet(resourceName, "created_date"), - resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:appmesh:[^:]+:\\d{12}:mesh/%s/virtualRouter/%s/route/%s", meshName, vrName, rName))), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s/%s", meshName, vrName, rName), + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), ImportState: true, ImportStateVerify: true, }, @@ -216,55 +309,141 @@ func testAccAwsAppmeshRoute_tcpRoute(t *testing.T) { }) } -func testAccAwsAppmeshRoute_tags(t *testing.T) { +func testAccAwsAppmeshRoute_routePriority(t *testing.T) { var r appmesh.RouteData - resourceName := "aws_appmesh_route.foo" - meshName := fmt.Sprintf("tf-test-mesh-%d", acctest.RandInt()) - vrName := fmt.Sprintf("tf-test-router-%d", acctest.RandInt()) - vn1Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - vn2Name := fmt.Sprintf("tf-test-node-%d", acctest.RandInt()) - rName := fmt.Sprintf("tf-test-route-%d", acctest.RandInt()) + resourceName := "aws_appmesh_route.test" + rName := fmt.Sprintf("tf-testacc-appmesh-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAppmeshRouteDestroy, + CheckDestroy: testAccCheckAwsAppmeshRouteDestroy, Steps: []resource.TestStep{ { - Config: testAccAppmeshRouteConfigWithTags(meshName, vrName, vn1Name, vn2Name, rName, "foo", "bar", "good", "bad"), + Config: testAccAwsAppmeshRouteConfig_routePriority(rName, 42), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), - resource.TestCheckResourceAttr( - resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr( - resourceName, "tags.foo", "bar"), - resource.TestCheckResourceAttr( - resourceName, "tags.good", "bad"), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "42"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { - Config: testAccAppmeshRouteConfigWithTags(meshName, vrName, vn1Name, vn2Name, rName, "foo2", "bar", "good", "bad2"), + Config: testAccAwsAppmeshRouteConfig_routePriority(rName, 1000), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), - resource.TestCheckResourceAttr( - resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr( - resourceName, "tags.foo2", "bar"), - resource.TestCheckResourceAttr( - resourceName, "tags.good", "bad2"), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "1000"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { - Config: testAccAppmeshRouteConfig_httpRoute(meshName, vrName, vn1Name, vn2Name, rName), + ResourceName: resourceName, + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAwsAppmeshRoute_tags(t *testing.T) { + var r appmesh.RouteData + resourceName := "aws_appmesh_route.test" + rName := fmt.Sprintf("tf-testacc-appmesh-%s", acctest.RandStringFromCharSet(13, acctest.CharSetAlphaNum)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsAppmeshRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsAppmeshRouteConfig_tags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2a"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), + ), + }, + { + Config: testAccAwsAppmeshRouteConfig_tagsUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAppmeshRouteExists(resourceName, &r), - resource.TestCheckResourceAttr( - resourceName, "tags.%", "0"), + testAccCheckAwsAppmeshRouteExists(resourceName, &r), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "appmesh", fmt.Sprintf("mesh/%s/virtualRouter/%s/route/%s", rName, rName, rName)), + resource.TestCheckResourceAttrSet(resourceName, "created_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_date"), + resource.TestCheckResourceAttr(resourceName, "mesh_name", rName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "spec.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.action.0.weighted_target.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.#", "1"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.header.#", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.method", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.prefix", "/"), + resource.TestCheckResourceAttr(resourceName, "spec.0.http_route.0.match.0.scheme", ""), + resource.TestCheckResourceAttr(resourceName, "spec.0.priority", "0"), + resource.TestCheckResourceAttr(resourceName, "spec.0.tcp_route.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2b"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), + resource.TestCheckResourceAttr(resourceName, "virtual_router_name", rName), ), }, { ResourceName: resourceName, - ImportStateId: fmt.Sprintf("%s/%s/%s", meshName, vrName, rName), + ImportStateIdFunc: testAccAwsAppmeshRouteImportStateIdFunc(resourceName), ImportState: true, ImportStateVerify: true, }, @@ -272,7 +451,7 @@ func testAccAwsAppmeshRoute_tags(t *testing.T) { }) } -func testAccCheckAppmeshRouteDestroy(s *terraform.State) error { +func testAccCheckAwsAppmeshRouteDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).appmeshconn for _, rs := range s.RootModule().Resources { @@ -297,7 +476,7 @@ func testAccCheckAppmeshRouteDestroy(s *terraform.State) error { return nil } -func testAccCheckAppmeshRouteExists(name string, v *appmesh.RouteData) resource.TestCheckFunc { +func testAccCheckAwsAppmeshRouteExists(name string, v *appmesh.RouteData) resource.TestCheckFunc { return func(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).appmeshconn @@ -324,49 +503,54 @@ func testAccCheckAppmeshRouteExists(name string, v *appmesh.RouteData) resource. } } -func testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name string) string { - return fmt.Sprintf(` -resource "aws_appmesh_mesh" "foo" { - name = %[1]q -} - -resource "aws_appmesh_virtual_router" "foo" { - name = %[2]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - - spec { - listener { - port_mapping { - port = 8080 - protocol = "http" - } +func testAccAwsAppmeshRouteImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not Found: %s", resourceName) } + + return fmt.Sprintf("%s/%s/%s", rs.Primary.Attributes["mesh_name"], rs.Primary.Attributes["virtual_router_name"], rs.Primary.Attributes["name"]), nil } } -resource "aws_appmesh_virtual_node" "foo" { - name = %[3]q - mesh_name = "${aws_appmesh_mesh.foo.id}" +func testAccAwsAppmeshRouteConfig_base(rName string) string { + return fmt.Sprintf(` +resource "aws_appmesh_mesh" "test" { + name = %[1]q +} + +resource "aws_appmesh_virtual_router" "test" { + name = %[1]q + mesh_name = "${aws_appmesh_mesh.test.id}" - spec {} + spec { + listener { + port_mapping { + port = 8080 + protocol = "http" + } + } + } } -resource "aws_appmesh_virtual_node" "bar" { - name = %[4]q - mesh_name = "${aws_appmesh_mesh.foo.id}" +resource "aws_appmesh_virtual_node" "test" { + count = 2 - spec {} + name = "%[1]s-${count.index}" + mesh_name = "${aws_appmesh_mesh.test.id}" + + spec {} } -`, meshName, vrName, vn1Name, vn2Name) +`, rName) } -func testAccAppmeshRouteConfig_httpRoute(meshName, vrName, vn1Name, vn2Name, rName string) string { - return testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name) + fmt.Sprintf(` - -resource "aws_appmesh_route" "foo" { +func testAccAwsAppmeshRouteConfig_httpRoute(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { name = %[1]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - virtual_router_name = "${aws_appmesh_virtual_router.foo.name}" + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" spec { http_route { @@ -376,7 +560,7 @@ resource "aws_appmesh_route" "foo" { action { weighted_target { - virtual_node = "${aws_appmesh_virtual_node.foo.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" weight = 100 } } @@ -386,13 +570,12 @@ resource "aws_appmesh_route" "foo" { `, rName) } -func testAccAppmeshRouteConfig_httpRouteUpdated(meshName, vrName, vn1Name, vn2Name, rName string) string { - return testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name) + fmt.Sprintf(` - -resource "aws_appmesh_route" "foo" { +func testAccAwsAppmeshRouteConfig_httpRouteUpdated(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { name = %[1]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - virtual_router_name = "${aws_appmesh_virtual_router.foo.name}" + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" spec { http_route { @@ -402,12 +585,12 @@ resource "aws_appmesh_route" "foo" { action { weighted_target { - virtual_node = "${aws_appmesh_virtual_node.foo.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" weight = 90 } weighted_target { - virtual_node = "${aws_appmesh_virtual_node.bar.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[1]}" weight = 10 } } @@ -417,19 +600,71 @@ resource "aws_appmesh_route" "foo" { `, rName) } -func testAccAppmeshRouteConfig_tcpRoute(meshName, vrName, vn1Name, vn2Name, rName string) string { - return testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name) + fmt.Sprintf(` +func testAccAwsAppmeshRouteConfig_httpHeader(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { + name = %[1]q + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" -resource "aws_appmesh_route" "foo" { + spec { + http_route { + match { + prefix = "/" + method = "POST" + scheme = "http" + + header { + name = "X-Testing1" + } + } + + action { + weighted_target { + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" + weight = 100 + } + } + } + } +} +`, rName) +} + +func testAccAwsAppmeshRouteConfig_httpHeaderUpdated(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { name = %[1]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - virtual_router_name = "${aws_appmesh_virtual_router.foo.name}" + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" spec { - tcp_route { + http_route { + match { + prefix = "/path" + method = "PUT" + scheme = "https" + + header { + name = "X-Testing1" + invert = true + } + header { + name = "X-Testing2" + invert = false + + match { + range { + start = 2 + end = 7 + } + } + } + } + action { weighted_target { - virtual_node = "${aws_appmesh_virtual_node.foo.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" weight = 100 } } @@ -439,24 +674,44 @@ resource "aws_appmesh_route" "foo" { `, rName) } -func testAccAppmeshRouteConfig_tcpRouteUpdated(meshName, vrName, vn1Name, vn2Name, rName string) string { - return testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name) + fmt.Sprintf(` +func testAccAwsAppmeshRouteConfig_tcpRoute(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { + name = %[1]q + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" + + spec { + tcp_route { + action { + weighted_target { + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" + weight = 100 + } + } + } + } +} +`, rName) +} -resource "aws_appmesh_route" "foo" { +func testAccAwsAppmeshRouteConfig_tcpRouteUpdated(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { name = %[1]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - virtual_router_name = "${aws_appmesh_virtual_router.foo.name}" + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" spec { tcp_route { action { weighted_target { - virtual_node = "${aws_appmesh_virtual_node.foo.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" weight = 90 } weighted_target { - virtual_node = "${aws_appmesh_virtual_node.bar.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[1]}" weight = 10 } } @@ -466,13 +721,70 @@ resource "aws_appmesh_route" "foo" { `, rName) } -func testAccAppmeshRouteConfigWithTags(meshName, vrName, vn1Name, vn2Name, rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return testAccAppmeshRouteConfigBase(meshName, vrName, vn1Name, vn2Name) + fmt.Sprintf(` +func testAccAwsAppmeshRouteConfig_routePriority(rName string, priority int) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { + name = %[1]q + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" -resource "aws_appmesh_route" "foo" { + spec { + http_route { + match { + prefix = "/" + } + + action { + weighted_target { + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" + weight = 100 + } + } + } + + priority = %d + } +} +`, rName, priority) +} + +func testAccAwsAppmeshRouteConfig_tags(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { + name = %[1]q + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" + + spec { + http_route { + match { + prefix = "/" + } + + action { + weighted_target { + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" + weight = 100 + } + } + } + } + + tags = { + Name = %[1]q + Key1 = "Value1" + Key2 = "Value2a" + } +} +`, rName) +} + +func testAccAwsAppmeshRouteConfig_tagsUpdated(rName string) string { + return testAccAwsAppmeshRouteConfig_base(rName) + fmt.Sprintf(` +resource "aws_appmesh_route" "test" { name = %[1]q - mesh_name = "${aws_appmesh_mesh.foo.id}" - virtual_router_name = "${aws_appmesh_virtual_router.foo.name}" + mesh_name = "${aws_appmesh_mesh.test.id}" + virtual_router_name = "${aws_appmesh_virtual_router.test.name}" spec { http_route { @@ -482,7 +794,7 @@ resource "aws_appmesh_route" "foo" { action { weighted_target { - virtual_node = "${aws_appmesh_virtual_node.foo.name}" + virtual_node = "${aws_appmesh_virtual_node.test.*.name[0]}" weight = 100 } } @@ -490,9 +802,10 @@ resource "aws_appmesh_route" "foo" { } tags = { - %[2]s = %[3]q - %[4]s = %[5]q + Name = %[1]q + Key2 = "Value2b" + Key3 = "Value3" } } -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +`, rName) } diff --git a/aws/resource_aws_appmesh_test.go b/aws/resource_aws_appmesh_test.go index bc26eadbc17..86bb8a44f76 100644 --- a/aws/resource_aws_appmesh_test.go +++ b/aws/resource_aws_appmesh_test.go @@ -12,9 +12,11 @@ func TestAccAWSAppmesh(t *testing.T) { "tags": testAccAwsAppmeshMesh_tags, }, "Route": { - "httpRoute": testAccAwsAppmeshRoute_httpRoute, - "tcpRoute": testAccAwsAppmeshRoute_tcpRoute, - "tags": testAccAwsAppmeshRoute_tags, + "httpHeader": testAccAwsAppmeshRoute_httpHeader, + "httpRoute": testAccAwsAppmeshRoute_httpRoute, + "tcpRoute": testAccAwsAppmeshRoute_tcpRoute, + "routePriority": testAccAwsAppmeshRoute_routePriority, + "tags": testAccAwsAppmeshRoute_tags, }, "VirtualNode": { "basic": testAccAwsAppmeshVirtualNode_basic, diff --git a/aws/resource_aws_appmesh_virtual_node.go b/aws/resource_aws_appmesh_virtual_node.go index 6ff75998551..ab8b3753adf 100644 --- a/aws/resource_aws_appmesh_virtual_node.go +++ b/aws/resource_aws_appmesh_virtual_node.go @@ -28,14 +28,29 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { MigrateState: resourceAwsAppmeshVirtualNodeMigrateState, Schema: map[string]*schema.Schema{ - "name": { + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "created_date": { + Type: schema.TypeString, + Computed: true, + }, + + "last_updated_date": { + Type: schema.TypeString, + Computed: true, + }, + + "mesh_name": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 255), }, - "mesh_name": { + "name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -49,14 +64,6 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "backends": { - Type: schema.TypeSet, - Removed: "Use `backend` configuration blocks instead", - Optional: true, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "backend": { Type: schema.TypeSet, Optional: true, @@ -81,7 +88,15 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, - Set: appmeshVirtualNodeBackendHash, + Set: appmeshBackendHash, + }, + + "backends": { + Type: schema.TypeSet, + Removed: "Use `backend` configuration blocks instead", + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "listener": { @@ -172,7 +187,7 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, }, - Set: appmeshVirtualNodeListenerHash, + Set: appmeshListenerHash, }, "logging": { @@ -254,18 +269,18 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { ConflictsWith: []string{"spec.0.service_discovery.0.aws_cloud_map"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.NoZeroValues, + }, + "service_name": { Type: schema.TypeString, Removed: "Use `hostname` argument instead", Optional: true, Computed: true, }, - - "hostname": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.NoZeroValues, - }, }, }, }, @@ -276,21 +291,6 @@ func resourceAwsAppmeshVirtualNode() *schema.Resource { }, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, - - "created_date": { - Type: schema.TypeString, - Computed: true, - }, - - "last_updated_date": { - Type: schema.TypeString, - Computed: true, - }, - "tags": tagsSchema(), }, } @@ -301,12 +301,12 @@ func resourceAwsAppmeshVirtualNodeCreate(d *schema.ResourceData, meta interface{ req := &appmesh.CreateVirtualNodeInput{ MeshName: aws.String(d.Get("mesh_name").(string)), - VirtualNodeName: aws.String(d.Get("name").(string)), Spec: expandAppmeshVirtualNodeSpec(d.Get("spec").([]interface{})), Tags: tagsFromMapAppmesh(d.Get("tags").(map[string]interface{})), + VirtualNodeName: aws.String(d.Get("name").(string)), } - log.Printf("[DEBUG] Creating App Mesh virtual node: %#v", req) + log.Printf("[DEBUG] Creating App Mesh virtual node: %s", req) resp, err := conn.CreateVirtualNode(req) if err != nil { return fmt.Errorf("error creating App Mesh virtual node: %s", err) @@ -330,7 +330,7 @@ func resourceAwsAppmeshVirtualNodeRead(d *schema.ResourceData, meta interface{}) return nil } if err != nil { - return fmt.Errorf("error reading App Mesh virtual node: %s", err) + return fmt.Errorf("error reading App Mesh virtual node (%s): %s", d.Id(), err) } if aws.StringValue(resp.VirtualNode.Status.Status) == appmesh.VirtualNodeStatusCodeDeleted { log.Printf("[WARN] App Mesh virtual node (%s) not found, removing from state", d.Id()) @@ -338,11 +338,11 @@ func resourceAwsAppmeshVirtualNodeRead(d *schema.ResourceData, meta interface{}) return nil } - d.Set("name", resp.VirtualNode.VirtualNodeName) - d.Set("mesh_name", resp.VirtualNode.MeshName) d.Set("arn", resp.VirtualNode.Metadata.Arn) d.Set("created_date", resp.VirtualNode.Metadata.CreatedAt.Format(time.RFC3339)) d.Set("last_updated_date", resp.VirtualNode.Metadata.LastUpdatedAt.Format(time.RFC3339)) + d.Set("mesh_name", resp.VirtualNode.MeshName) + d.Set("name", resp.VirtualNode.VirtualNodeName) err = d.Set("spec", flattenAppmeshVirtualNodeSpec(resp.VirtualNode.Spec)) if err != nil { return fmt.Errorf("error setting spec: %s", err) @@ -368,14 +368,14 @@ func resourceAwsAppmeshVirtualNodeUpdate(d *schema.ResourceData, meta interface{ _, v := d.GetChange("spec") req := &appmesh.UpdateVirtualNodeInput{ MeshName: aws.String(d.Get("mesh_name").(string)), - VirtualNodeName: aws.String(d.Get("name").(string)), Spec: expandAppmeshVirtualNodeSpec(v.([]interface{})), + VirtualNodeName: aws.String(d.Get("name").(string)), } - log.Printf("[DEBUG] Updating App Mesh virtual node: %#v", req) + log.Printf("[DEBUG] Updating App Mesh virtual node: %s", req) _, err := conn.UpdateVirtualNode(req) if err != nil { - return fmt.Errorf("error updating App Mesh virtual node: %s", err) + return fmt.Errorf("error updating App Mesh virtual node (%s): %s", d.Id(), err) } } @@ -404,7 +404,7 @@ func resourceAwsAppmeshVirtualNodeDelete(d *schema.ResourceData, meta interface{ return nil } if err != nil { - return fmt.Errorf("error deleting App Mesh virtual node: %s", err) + return fmt.Errorf("error deleting App Mesh virtual node (%s): %s", d.Id(), err) } return nil @@ -431,13 +431,274 @@ func resourceAwsAppmeshVirtualNodeImport(d *schema.ResourceData, meta interface{ } d.SetId(aws.StringValue(resp.VirtualNode.Metadata.Uid)) - d.Set("name", resp.VirtualNode.VirtualNodeName) d.Set("mesh_name", resp.VirtualNode.MeshName) + d.Set("name", resp.VirtualNode.VirtualNodeName) return []*schema.ResourceData{d}, nil } -func appmeshVirtualNodeBackendHash(vBackend interface{}) int { +func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec { + spec := &appmesh.VirtualNodeSpec{} + + if len(vSpec) == 0 || vSpec[0] == nil { + // Empty Spec is allowed. + return spec + } + mSpec := vSpec[0].(map[string]interface{}) + + if vBackends, ok := mSpec["backend"].(*schema.Set); ok && vBackends.Len() > 0 { + backends := []*appmesh.Backend{} + + for _, vBackend := range vBackends.List() { + backend := &appmesh.Backend{} + + mBackend := vBackend.(map[string]interface{}) + + if vVirtualService, ok := mBackend["virtual_service"].([]interface{}); ok && len(vVirtualService) > 0 && vVirtualService[0] != nil { + mVirtualService := vVirtualService[0].(map[string]interface{}) + + backend.VirtualService = &appmesh.VirtualServiceBackend{} + + if vVirtualServiceName, ok := mVirtualService["virtual_service_name"].(string); ok { + backend.VirtualService.VirtualServiceName = aws.String(vVirtualServiceName) + } + } + backends = append(backends, backend) + } + + spec.Backends = backends + } + + if vListeners, ok := mSpec["listener"].(*schema.Set); ok && vListeners.Len() > 0 { + listeners := []*appmesh.Listener{} + + for _, vListener := range vListeners.List() { + listener := &appmesh.Listener{} + + mListener := vListener.(map[string]interface{}) + + if vHealthCheck, ok := mListener["health_check"].([]interface{}); ok && len(vHealthCheck) > 0 && vHealthCheck[0] != nil { + mHealthCheck := vHealthCheck[0].(map[string]interface{}) + + listener.HealthCheck = &appmesh.HealthCheckPolicy{} + + if vHealthyThreshold, ok := mHealthCheck["healthy_threshold"].(int); ok && vHealthyThreshold > 0 { + listener.HealthCheck.HealthyThreshold = aws.Int64(int64(vHealthyThreshold)) + } + if vIntervalMillis, ok := mHealthCheck["interval_millis"].(int); ok && vIntervalMillis > 0 { + listener.HealthCheck.IntervalMillis = aws.Int64(int64(vIntervalMillis)) + } + if vPath, ok := mHealthCheck["path"].(string); ok && vPath != "" { + listener.HealthCheck.Path = aws.String(vPath) + } + if vPort, ok := mHealthCheck["port"].(int); ok && vPort > 0 { + listener.HealthCheck.Port = aws.Int64(int64(vPort)) + } + if vProtocol, ok := mHealthCheck["protocol"].(string); ok && vProtocol != "" { + listener.HealthCheck.Protocol = aws.String(vProtocol) + } + if vTimeoutMillis, ok := mHealthCheck["timeout_millis"].(int); ok && vTimeoutMillis > 0 { + listener.HealthCheck.TimeoutMillis = aws.Int64(int64(vTimeoutMillis)) + } + if vUnhealthyThreshold, ok := mHealthCheck["unhealthy_threshold"].(int); ok && vUnhealthyThreshold > 0 { + listener.HealthCheck.UnhealthyThreshold = aws.Int64(int64(vUnhealthyThreshold)) + } + } + + if vPortMapping, ok := mListener["port_mapping"].([]interface{}); ok && len(vPortMapping) > 0 && vPortMapping[0] != nil { + mPortMapping := vPortMapping[0].(map[string]interface{}) + + listener.PortMapping = &appmesh.PortMapping{} + + if vPort, ok := mPortMapping["port"].(int); ok && vPort > 0 { + listener.PortMapping.Port = aws.Int64(int64(vPort)) + } + if vProtocol, ok := mPortMapping["protocol"].(string); ok && vProtocol != "" { + listener.PortMapping.Protocol = aws.String(vProtocol) + } + } + + listeners = append(listeners, listener) + } + + spec.Listeners = listeners + } + + if vLogging, ok := mSpec["logging"].([]interface{}); ok && len(vLogging) > 0 && vLogging[0] != nil { + mLogging := vLogging[0].(map[string]interface{}) + + if vAccessLog, ok := mLogging["access_log"].([]interface{}); ok && len(vAccessLog) > 0 && vAccessLog[0] != nil { + mAccessLog := vAccessLog[0].(map[string]interface{}) + + if vFile, ok := mAccessLog["file"].([]interface{}); ok && len(vFile) > 0 && vFile[0] != nil { + mFile := vFile[0].(map[string]interface{}) + + if vPath, ok := mFile["path"].(string); ok && vPath != "" { + spec.Logging = &appmesh.Logging{ + AccessLog: &appmesh.AccessLog{ + File: &appmesh.FileAccessLog{ + Path: aws.String(vPath), + }, + }, + } + } + } + } + } + + if vServiceDiscovery, ok := mSpec["service_discovery"].([]interface{}); ok && len(vServiceDiscovery) > 0 && vServiceDiscovery[0] != nil { + spec.ServiceDiscovery = &appmesh.ServiceDiscovery{} + + mServiceDiscovery := vServiceDiscovery[0].(map[string]interface{}) + + if vAwsCloudMap, ok := mServiceDiscovery["aws_cloud_map"].([]interface{}); ok && len(vAwsCloudMap) > 0 && vAwsCloudMap[0] != nil { + spec.ServiceDiscovery.AwsCloudMap = &appmesh.AwsCloudMapServiceDiscovery{} + + mAwsCloudMap := vAwsCloudMap[0].(map[string]interface{}) + + if vAttributes, ok := mAwsCloudMap["attributes"].(map[string]interface{}); ok && len(vAttributes) > 0 { + attributes := []*appmesh.AwsCloudMapInstanceAttribute{} + + for k, v := range vAttributes { + attributes = append(attributes, &appmesh.AwsCloudMapInstanceAttribute{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + spec.ServiceDiscovery.AwsCloudMap.Attributes = attributes + } + if vNamespaceName, ok := mAwsCloudMap["namespace_name"].(string); ok && vNamespaceName != "" { + spec.ServiceDiscovery.AwsCloudMap.NamespaceName = aws.String(vNamespaceName) + } + if vServiceName, ok := mAwsCloudMap["service_name"].(string); ok && vServiceName != "" { + spec.ServiceDiscovery.AwsCloudMap.ServiceName = aws.String(vServiceName) + } + } + + if vDns, ok := mServiceDiscovery["dns"].([]interface{}); ok && len(vDns) > 0 && vDns[0] != nil { + mDns := vDns[0].(map[string]interface{}) + + if vHostname, ok := mDns["hostname"].(string); ok && vHostname != "" { + spec.ServiceDiscovery.Dns = &appmesh.DnsServiceDiscovery{ + Hostname: aws.String(vHostname), + } + } + } + } + + return spec +} + +func flattenAppmeshVirtualNodeSpec(spec *appmesh.VirtualNodeSpec) []interface{} { + if spec == nil { + return []interface{}{} + } + + mSpec := map[string]interface{}{} + + if spec.Backends != nil { + vBackends := []interface{}{} + + for _, backend := range spec.Backends { + mBackend := map[string]interface{}{} + + if backend.VirtualService != nil { + mVirtualService := map[string]interface{}{ + "virtual_service_name": aws.StringValue(backend.VirtualService.VirtualServiceName), + } + mBackend["virtual_service"] = []interface{}{mVirtualService} + } + + vBackends = append(vBackends, mBackend) + } + + mSpec["backend"] = schema.NewSet(appmeshBackendHash, vBackends) + } + + if spec.Listeners != nil { + vListeners := []interface{}{} + + for _, listener := range spec.Listeners { + mListener := map[string]interface{}{} + + if listener.HealthCheck != nil { + mHealthCheck := map[string]interface{}{ + "healthy_threshold": int(aws.Int64Value(listener.HealthCheck.HealthyThreshold)), + "interval_millis": int(aws.Int64Value(listener.HealthCheck.IntervalMillis)), + "path": aws.StringValue(listener.HealthCheck.Path), + "port": int(aws.Int64Value(listener.HealthCheck.Port)), + "protocol": aws.StringValue(listener.HealthCheck.Protocol), + "timeout_millis": int(aws.Int64Value(listener.HealthCheck.TimeoutMillis)), + "unhealthy_threshold": int(aws.Int64Value(listener.HealthCheck.UnhealthyThreshold)), + } + mListener["health_check"] = []interface{}{mHealthCheck} + } + + if listener.PortMapping != nil { + mPortMapping := map[string]interface{}{ + "port": int(aws.Int64Value(listener.PortMapping.Port)), + "protocol": aws.StringValue(listener.PortMapping.Protocol), + } + mListener["port_mapping"] = []interface{}{mPortMapping} + } + + vListeners = append(vListeners, mListener) + } + + mSpec["listener"] = schema.NewSet(appmeshListenerHash, vListeners) + } + + if spec.Logging != nil && spec.Logging.AccessLog != nil && spec.Logging.AccessLog.File != nil { + mSpec["logging"] = []interface{}{ + map[string]interface{}{ + "access_log": []interface{}{ + map[string]interface{}{ + "file": []interface{}{ + map[string]interface{}{ + "path": aws.StringValue(spec.Logging.AccessLog.File.Path), + }, + }, + }, + }, + }, + } + } + + if spec.ServiceDiscovery != nil { + mServiceDiscovery := map[string]interface{}{} + + if spec.ServiceDiscovery.AwsCloudMap != nil { + vAttributes := map[string]interface{}{} + + for _, attribute := range spec.ServiceDiscovery.AwsCloudMap.Attributes { + vAttributes[aws.StringValue(attribute.Key)] = aws.StringValue(attribute.Value) + } + + mServiceDiscovery["aws_cloud_map"] = []interface{}{ + map[string]interface{}{ + "attributes": vAttributes, + "namespace_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.NamespaceName), + "service_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.ServiceName), + }, + } + } + + if spec.ServiceDiscovery.Dns != nil { + mServiceDiscovery["dns"] = []interface{}{ + map[string]interface{}{ + "hostname": aws.StringValue(spec.ServiceDiscovery.Dns.Hostname), + }, + } + } + + mSpec["service_discovery"] = []interface{}{mServiceDiscovery} + } + + return []interface{}{mSpec} +} + +func appmeshBackendHash(vBackend interface{}) int { var buf bytes.Buffer mBackend := vBackend.(map[string]interface{}) if vVirtualService, ok := mBackend["virtual_service"].([]interface{}); ok && len(vVirtualService) > 0 && vVirtualService[0] != nil { @@ -449,7 +710,7 @@ func appmeshVirtualNodeBackendHash(vBackend interface{}) int { return hashcode.String(buf.String()) } -func appmeshVirtualNodeListenerHash(vListener interface{}) int { +func appmeshListenerHash(vListener interface{}) int { var buf bytes.Buffer mListener := vListener.(map[string]interface{}) if vHealthCheck, ok := mListener["health_check"].([]interface{}); ok && len(vHealthCheck) > 0 && vHealthCheck[0] != nil { diff --git a/aws/resource_aws_appmesh_virtual_node_migrate.go b/aws/resource_aws_appmesh_virtual_node_migrate.go index eb71da9efdd..312f2352c35 100644 --- a/aws/resource_aws_appmesh_virtual_node_migrate.go +++ b/aws/resource_aws_appmesh_virtual_node_migrate.go @@ -29,7 +29,7 @@ func migrateAppmeshVirtualNodeStateV0toV1(is *terraform.InstanceState) (*terrafo delete(is.Attributes, "spec.0.backends.#") for k, v := range is.Attributes { if strings.HasPrefix(k, "spec.0.backends.") { - hash := appmeshVirtualNodeBackendHash(map[string]interface{}{ + hash := appmeshBackendHash(map[string]interface{}{ "virtual_service": []interface{}{map[string]interface{}{ "virtual_service_name": v, }}, diff --git a/aws/resource_aws_appmesh_virtual_router.go b/aws/resource_aws_appmesh_virtual_router.go index c4a4f6188cc..131cf0d3573 100644 --- a/aws/resource_aws_appmesh_virtual_router.go +++ b/aws/resource_aws_appmesh_virtual_router.go @@ -257,6 +257,75 @@ func resourceAwsAppmeshVirtualRouterImport(d *schema.ResourceData, meta interfac return []*schema.ResourceData{d}, nil } +func expandAppmeshVirtualRouterSpec(vSpec []interface{}) *appmesh.VirtualRouterSpec { + spec := &appmesh.VirtualRouterSpec{} + + if len(vSpec) == 0 || vSpec[0] == nil { + // Empty Spec is allowed. + return spec + } + mSpec := vSpec[0].(map[string]interface{}) + + if vListeners, ok := mSpec["listener"].(*schema.Set); ok && vListeners.Len() > 0 { + listeners := []*appmesh.VirtualRouterListener{} + + for _, vListener := range vListeners.List() { + listener := &appmesh.VirtualRouterListener{} + + mListener := vListener.(map[string]interface{}) + + if vPortMapping, ok := mListener["port_mapping"].([]interface{}); ok && len(vPortMapping) > 0 && vPortMapping[0] != nil { + mPortMapping := vPortMapping[0].(map[string]interface{}) + + listener.PortMapping = &appmesh.PortMapping{} + + if vPort, ok := mPortMapping["port"].(int); ok && vPort > 0 { + listener.PortMapping.Port = aws.Int64(int64(vPort)) + } + if vProtocol, ok := mPortMapping["protocol"].(string); ok && vProtocol != "" { + listener.PortMapping.Protocol = aws.String(vProtocol) + } + } + + listeners = append(listeners, listener) + } + + spec.Listeners = listeners + } + + return spec +} + +func flattenAppmeshVirtualRouterSpec(spec *appmesh.VirtualRouterSpec) []interface{} { + if spec == nil { + return []interface{}{} + } + + mSpec := map[string]interface{}{} + + if spec.Listeners != nil { + vListeners := []interface{}{} + + for _, listener := range spec.Listeners { + mListener := map[string]interface{}{} + + if listener.PortMapping != nil { + mPortMapping := map[string]interface{}{ + "port": int(aws.Int64Value(listener.PortMapping.Port)), + "protocol": aws.StringValue(listener.PortMapping.Protocol), + } + mListener["port_mapping"] = []interface{}{mPortMapping} + } + + vListeners = append(vListeners, mListener) + } + + mSpec["listener"] = schema.NewSet(appmeshVirtualRouterListenerHash, vListeners) + } + + return []interface{}{mSpec} +} + func appmeshVirtualRouterListenerHash(vListener interface{}) int { var buf bytes.Buffer mListener := vListener.(map[string]interface{}) diff --git a/aws/resource_aws_appmesh_virtual_service.go b/aws/resource_aws_appmesh_virtual_service.go index 9ac2fbba396..c49350e4a34 100644 --- a/aws/resource_aws_appmesh_virtual_service.go +++ b/aws/resource_aws_appmesh_virtual_service.go @@ -251,3 +251,73 @@ func resourceAwsAppmeshVirtualServiceImport(d *schema.ResourceData, meta interfa return []*schema.ResourceData{d}, nil } + +func expandAppmeshVirtualServiceSpec(vSpec []interface{}) *appmesh.VirtualServiceSpec { + spec := &appmesh.VirtualServiceSpec{} + + if len(vSpec) == 0 || vSpec[0] == nil { + // Empty Spec is allowed. + return spec + } + mSpec := vSpec[0].(map[string]interface{}) + + if vProvider, ok := mSpec["provider"].([]interface{}); ok && len(vProvider) > 0 && vProvider[0] != nil { + mProvider := vProvider[0].(map[string]interface{}) + + spec.Provider = &appmesh.VirtualServiceProvider{} + + if vVirtualNode, ok := mProvider["virtual_node"].([]interface{}); ok && len(vVirtualNode) > 0 && vVirtualNode[0] != nil { + mVirtualNode := vVirtualNode[0].(map[string]interface{}) + + if vVirtualNodeName, ok := mVirtualNode["virtual_node_name"].(string); ok && vVirtualNodeName != "" { + spec.Provider.VirtualNode = &appmesh.VirtualNodeServiceProvider{ + VirtualNodeName: aws.String(vVirtualNodeName), + } + } + } + + if vVirtualRouter, ok := mProvider["virtual_router"].([]interface{}); ok && len(vVirtualRouter) > 0 && vVirtualRouter[0] != nil { + mVirtualRouter := vVirtualRouter[0].(map[string]interface{}) + + if vVirtualRouterName, ok := mVirtualRouter["virtual_router_name"].(string); ok && vVirtualRouterName != "" { + spec.Provider.VirtualRouter = &appmesh.VirtualRouterServiceProvider{ + VirtualRouterName: aws.String(vVirtualRouterName), + } + } + } + } + + return spec +} + +func flattenAppmeshVirtualServiceSpec(spec *appmesh.VirtualServiceSpec) []interface{} { + if spec == nil { + return []interface{}{} + } + + mSpec := map[string]interface{}{} + + if spec.Provider != nil { + mProvider := map[string]interface{}{} + + if spec.Provider.VirtualNode != nil { + mProvider["virtual_node"] = []interface{}{ + map[string]interface{}{ + "virtual_node_name": aws.StringValue(spec.Provider.VirtualNode.VirtualNodeName), + }, + } + } + + if spec.Provider.VirtualRouter != nil { + mProvider["virtual_router"] = []interface{}{ + map[string]interface{}{ + "virtual_router_name": aws.StringValue(spec.Provider.VirtualRouter.VirtualRouterName), + }, + } + } + + mSpec["provider"] = []interface{}{mProvider} + } + + return []interface{}{mSpec} +} diff --git a/aws/structure.go b/aws/structure.go index db19625ed74..080046bf8d3 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" "github.com/aws/aws-sdk-go/service/apigateway" - "github.com/aws/aws-sdk-go/service/appmesh" "github.com/aws/aws-sdk-go/service/appsync" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudformation" @@ -4828,602 +4827,6 @@ func flattenRdsScalingConfigurationInfo(scalingConfigurationInfo *rds.ScalingCon return []interface{}{m} } -func expandAppmeshMeshSpec(vSpec []interface{}) *appmesh.MeshSpec { - spec := &appmesh.MeshSpec{} - - if len(vSpec) == 0 || vSpec[0] == nil { - // Empty Spec is allowed. - return spec - } - mSpec := vSpec[0].(map[string]interface{}) - - if vEgressFilter, ok := mSpec["egress_filter"].([]interface{}); ok && len(vEgressFilter) > 0 && vEgressFilter[0] != nil { - mEgressFilter := vEgressFilter[0].(map[string]interface{}) - - if vType, ok := mEgressFilter["type"].(string); ok && vType != "" { - spec.EgressFilter = &appmesh.EgressFilter{ - Type: aws.String(vType), - } - } - } - - return spec -} - -func flattenAppmeshMeshSpec(spec *appmesh.MeshSpec) []interface{} { - if spec == nil { - return []interface{}{} - } - - mSpec := map[string]interface{}{} - - if spec.EgressFilter != nil { - mSpec["egress_filter"] = []interface{}{ - map[string]interface{}{ - "type": aws.StringValue(spec.EgressFilter.Type), - }, - } - } - - return []interface{}{mSpec} -} - -func expandAppmeshVirtualRouterSpec(vSpec []interface{}) *appmesh.VirtualRouterSpec { - spec := &appmesh.VirtualRouterSpec{} - - if len(vSpec) == 0 || vSpec[0] == nil { - // Empty Spec is allowed. - return spec - } - mSpec := vSpec[0].(map[string]interface{}) - - if vListeners, ok := mSpec["listener"].(*schema.Set); ok && vListeners.Len() > 0 { - listeners := []*appmesh.VirtualRouterListener{} - - for _, vListener := range vListeners.List() { - listener := &appmesh.VirtualRouterListener{} - - mListener := vListener.(map[string]interface{}) - - if vPortMapping, ok := mListener["port_mapping"].([]interface{}); ok && len(vPortMapping) > 0 && vPortMapping[0] != nil { - mPortMapping := vPortMapping[0].(map[string]interface{}) - - listener.PortMapping = &appmesh.PortMapping{} - - if vPort, ok := mPortMapping["port"].(int); ok && vPort > 0 { - listener.PortMapping.Port = aws.Int64(int64(vPort)) - } - if vProtocol, ok := mPortMapping["protocol"].(string); ok && vProtocol != "" { - listener.PortMapping.Protocol = aws.String(vProtocol) - } - } - - listeners = append(listeners, listener) - } - - spec.Listeners = listeners - } - - return spec -} - -func flattenAppmeshVirtualRouterSpec(spec *appmesh.VirtualRouterSpec) []interface{} { - if spec == nil { - return []interface{}{} - } - - mSpec := map[string]interface{}{} - - if spec.Listeners != nil { - vListeners := []interface{}{} - - for _, listener := range spec.Listeners { - mListener := map[string]interface{}{} - - if listener.PortMapping != nil { - mPortMapping := map[string]interface{}{ - "port": int(aws.Int64Value(listener.PortMapping.Port)), - "protocol": aws.StringValue(listener.PortMapping.Protocol), - } - mListener["port_mapping"] = []interface{}{mPortMapping} - } - - vListeners = append(vListeners, mListener) - } - - mSpec["listener"] = schema.NewSet(appmeshVirtualNodeListenerHash, vListeners) - } - - return []interface{}{mSpec} -} - -func expandAppmeshVirtualNodeSpec(vSpec []interface{}) *appmesh.VirtualNodeSpec { - spec := &appmesh.VirtualNodeSpec{} - - if len(vSpec) == 0 || vSpec[0] == nil { - // Empty Spec is allowed. - return spec - } - mSpec := vSpec[0].(map[string]interface{}) - - if vBackends, ok := mSpec["backend"].(*schema.Set); ok && vBackends.Len() > 0 { - backends := []*appmesh.Backend{} - - for _, vBackend := range vBackends.List() { - backend := &appmesh.Backend{} - - mBackend := vBackend.(map[string]interface{}) - - if vVirtualService, ok := mBackend["virtual_service"].([]interface{}); ok && len(vVirtualService) > 0 && vVirtualService[0] != nil { - mVirtualService := vVirtualService[0].(map[string]interface{}) - - backend.VirtualService = &appmesh.VirtualServiceBackend{} - - if vVirtualServiceName, ok := mVirtualService["virtual_service_name"].(string); ok { - backend.VirtualService.VirtualServiceName = aws.String(vVirtualServiceName) - } - } - backends = append(backends, backend) - } - - spec.Backends = backends - } - - if vListeners, ok := mSpec["listener"].(*schema.Set); ok && vListeners.Len() > 0 { - listeners := []*appmesh.Listener{} - - for _, vListener := range vListeners.List() { - listener := &appmesh.Listener{} - - mListener := vListener.(map[string]interface{}) - - if vHealthCheck, ok := mListener["health_check"].([]interface{}); ok && len(vHealthCheck) > 0 && vHealthCheck[0] != nil { - mHealthCheck := vHealthCheck[0].(map[string]interface{}) - - listener.HealthCheck = &appmesh.HealthCheckPolicy{} - - if vHealthyThreshold, ok := mHealthCheck["healthy_threshold"].(int); ok && vHealthyThreshold > 0 { - listener.HealthCheck.HealthyThreshold = aws.Int64(int64(vHealthyThreshold)) - } - if vIntervalMillis, ok := mHealthCheck["interval_millis"].(int); ok && vIntervalMillis > 0 { - listener.HealthCheck.IntervalMillis = aws.Int64(int64(vIntervalMillis)) - } - if vPath, ok := mHealthCheck["path"].(string); ok && vPath != "" { - listener.HealthCheck.Path = aws.String(vPath) - } - if vPort, ok := mHealthCheck["port"].(int); ok && vPort > 0 { - listener.HealthCheck.Port = aws.Int64(int64(vPort)) - } - if vProtocol, ok := mHealthCheck["protocol"].(string); ok && vProtocol != "" { - listener.HealthCheck.Protocol = aws.String(vProtocol) - } - if vTimeoutMillis, ok := mHealthCheck["timeout_millis"].(int); ok && vTimeoutMillis > 0 { - listener.HealthCheck.TimeoutMillis = aws.Int64(int64(vTimeoutMillis)) - } - if vUnhealthyThreshold, ok := mHealthCheck["unhealthy_threshold"].(int); ok && vUnhealthyThreshold > 0 { - listener.HealthCheck.UnhealthyThreshold = aws.Int64(int64(vUnhealthyThreshold)) - } - } - - if vPortMapping, ok := mListener["port_mapping"].([]interface{}); ok && len(vPortMapping) > 0 && vPortMapping[0] != nil { - mPortMapping := vPortMapping[0].(map[string]interface{}) - - listener.PortMapping = &appmesh.PortMapping{} - - if vPort, ok := mPortMapping["port"].(int); ok && vPort > 0 { - listener.PortMapping.Port = aws.Int64(int64(vPort)) - } - if vProtocol, ok := mPortMapping["protocol"].(string); ok && vProtocol != "" { - listener.PortMapping.Protocol = aws.String(vProtocol) - } - } - - listeners = append(listeners, listener) - } - - spec.Listeners = listeners - } - - if vLogging, ok := mSpec["logging"].([]interface{}); ok && len(vLogging) > 0 && vLogging[0] != nil { - mLogging := vLogging[0].(map[string]interface{}) - - if vAccessLog, ok := mLogging["access_log"].([]interface{}); ok && len(vAccessLog) > 0 && vAccessLog[0] != nil { - mAccessLog := vAccessLog[0].(map[string]interface{}) - - if vFile, ok := mAccessLog["file"].([]interface{}); ok && len(vFile) > 0 && vFile[0] != nil { - mFile := vFile[0].(map[string]interface{}) - - if vPath, ok := mFile["path"].(string); ok && vPath != "" { - spec.Logging = &appmesh.Logging{ - AccessLog: &appmesh.AccessLog{ - File: &appmesh.FileAccessLog{ - Path: aws.String(vPath), - }, - }, - } - } - } - } - } - - if vServiceDiscovery, ok := mSpec["service_discovery"].([]interface{}); ok && len(vServiceDiscovery) > 0 && vServiceDiscovery[0] != nil { - spec.ServiceDiscovery = &appmesh.ServiceDiscovery{} - - mServiceDiscovery := vServiceDiscovery[0].(map[string]interface{}) - - if vAwsCloudMap, ok := mServiceDiscovery["aws_cloud_map"].([]interface{}); ok && len(vAwsCloudMap) > 0 && vAwsCloudMap[0] != nil { - spec.ServiceDiscovery.AwsCloudMap = &appmesh.AwsCloudMapServiceDiscovery{} - - mAwsCloudMap := vAwsCloudMap[0].(map[string]interface{}) - - if vAttributes, ok := mAwsCloudMap["attributes"].(map[string]interface{}); ok && len(vAttributes) > 0 { - attributes := []*appmesh.AwsCloudMapInstanceAttribute{} - - for k, v := range vAttributes { - attributes = append(attributes, &appmesh.AwsCloudMapInstanceAttribute{ - Key: aws.String(k), - Value: aws.String(v.(string)), - }) - } - - spec.ServiceDiscovery.AwsCloudMap.Attributes = attributes - } - if vNamespaceName, ok := mAwsCloudMap["namespace_name"].(string); ok && vNamespaceName != "" { - spec.ServiceDiscovery.AwsCloudMap.NamespaceName = aws.String(vNamespaceName) - } - if vServiceName, ok := mAwsCloudMap["service_name"].(string); ok && vServiceName != "" { - spec.ServiceDiscovery.AwsCloudMap.ServiceName = aws.String(vServiceName) - } - } - - if vDns, ok := mServiceDiscovery["dns"].([]interface{}); ok && len(vDns) > 0 && vDns[0] != nil { - mDns := vDns[0].(map[string]interface{}) - - if vHostname, ok := mDns["hostname"].(string); ok && vHostname != "" { - spec.ServiceDiscovery.Dns = &appmesh.DnsServiceDiscovery{ - Hostname: aws.String(vHostname), - } - } - } - } - - return spec -} - -func flattenAppmeshVirtualNodeSpec(spec *appmesh.VirtualNodeSpec) []interface{} { - if spec == nil { - return []interface{}{} - } - - mSpec := map[string]interface{}{} - - if spec.Backends != nil { - vBackends := []interface{}{} - - for _, backend := range spec.Backends { - mBackend := map[string]interface{}{} - - if backend.VirtualService != nil { - mVirtualService := map[string]interface{}{ - "virtual_service_name": aws.StringValue(backend.VirtualService.VirtualServiceName), - } - mBackend["virtual_service"] = []interface{}{mVirtualService} - } - - vBackends = append(vBackends, mBackend) - } - - mSpec["backend"] = schema.NewSet(appmeshVirtualNodeBackendHash, vBackends) - } - - if spec.Listeners != nil { - vListeners := []interface{}{} - - for _, listener := range spec.Listeners { - mListener := map[string]interface{}{} - - if listener.HealthCheck != nil { - mHealthCheck := map[string]interface{}{ - "healthy_threshold": int(aws.Int64Value(listener.HealthCheck.HealthyThreshold)), - "interval_millis": int(aws.Int64Value(listener.HealthCheck.IntervalMillis)), - "path": aws.StringValue(listener.HealthCheck.Path), - "port": int(aws.Int64Value(listener.HealthCheck.Port)), - "protocol": aws.StringValue(listener.HealthCheck.Protocol), - "timeout_millis": int(aws.Int64Value(listener.HealthCheck.TimeoutMillis)), - "unhealthy_threshold": int(aws.Int64Value(listener.HealthCheck.UnhealthyThreshold)), - } - mListener["health_check"] = []interface{}{mHealthCheck} - } - - if listener.PortMapping != nil { - mPortMapping := map[string]interface{}{ - "port": int(aws.Int64Value(listener.PortMapping.Port)), - "protocol": aws.StringValue(listener.PortMapping.Protocol), - } - mListener["port_mapping"] = []interface{}{mPortMapping} - } - - vListeners = append(vListeners, mListener) - } - - mSpec["listener"] = schema.NewSet(appmeshVirtualNodeListenerHash, vListeners) - } - - if spec.Logging != nil && spec.Logging.AccessLog != nil && spec.Logging.AccessLog.File != nil { - mSpec["logging"] = []interface{}{ - map[string]interface{}{ - "access_log": []interface{}{ - map[string]interface{}{ - "file": []interface{}{ - map[string]interface{}{ - "path": aws.StringValue(spec.Logging.AccessLog.File.Path), - }, - }, - }, - }, - }, - } - } - - if spec.ServiceDiscovery != nil { - mServiceDiscovery := map[string]interface{}{} - - if spec.ServiceDiscovery.AwsCloudMap != nil { - vAttributes := map[string]interface{}{} - - for _, attribute := range spec.ServiceDiscovery.AwsCloudMap.Attributes { - vAttributes[aws.StringValue(attribute.Key)] = aws.StringValue(attribute.Value) - } - - mServiceDiscovery["aws_cloud_map"] = []interface{}{ - map[string]interface{}{ - "attributes": vAttributes, - "namespace_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.NamespaceName), - "service_name": aws.StringValue(spec.ServiceDiscovery.AwsCloudMap.ServiceName), - }, - } - } - - if spec.ServiceDiscovery.Dns != nil { - mServiceDiscovery["dns"] = []interface{}{ - map[string]interface{}{ - "hostname": aws.StringValue(spec.ServiceDiscovery.Dns.Hostname), - }, - } - } - - mSpec["service_discovery"] = []interface{}{mServiceDiscovery} - } - - return []interface{}{mSpec} -} - -func expandAppmeshVirtualServiceSpec(vSpec []interface{}) *appmesh.VirtualServiceSpec { - spec := &appmesh.VirtualServiceSpec{} - - if len(vSpec) == 0 || vSpec[0] == nil { - // Empty Spec is allowed. - return spec - } - mSpec := vSpec[0].(map[string]interface{}) - - if vProvider, ok := mSpec["provider"].([]interface{}); ok && len(vProvider) > 0 && vProvider[0] != nil { - mProvider := vProvider[0].(map[string]interface{}) - - spec.Provider = &appmesh.VirtualServiceProvider{} - - if vVirtualNode, ok := mProvider["virtual_node"].([]interface{}); ok && len(vVirtualNode) > 0 && vVirtualNode[0] != nil { - mVirtualNode := vVirtualNode[0].(map[string]interface{}) - - if vVirtualNodeName, ok := mVirtualNode["virtual_node_name"].(string); ok && vVirtualNodeName != "" { - spec.Provider.VirtualNode = &appmesh.VirtualNodeServiceProvider{ - VirtualNodeName: aws.String(vVirtualNodeName), - } - } - } - - if vVirtualRouter, ok := mProvider["virtual_router"].([]interface{}); ok && len(vVirtualRouter) > 0 && vVirtualRouter[0] != nil { - mVirtualRouter := vVirtualRouter[0].(map[string]interface{}) - - if vVirtualRouterName, ok := mVirtualRouter["virtual_router_name"].(string); ok && vVirtualRouterName != "" { - spec.Provider.VirtualRouter = &appmesh.VirtualRouterServiceProvider{ - VirtualRouterName: aws.String(vVirtualRouterName), - } - } - } - } - - return spec -} - -func flattenAppmeshVirtualServiceSpec(spec *appmesh.VirtualServiceSpec) []interface{} { - if spec == nil { - return []interface{}{} - } - - mSpec := map[string]interface{}{} - - if spec.Provider != nil { - mProvider := map[string]interface{}{} - - if spec.Provider.VirtualNode != nil { - mProvider["virtual_node"] = []interface{}{ - map[string]interface{}{ - "virtual_node_name": aws.StringValue(spec.Provider.VirtualNode.VirtualNodeName), - }, - } - } - - if spec.Provider.VirtualRouter != nil { - mProvider["virtual_router"] = []interface{}{ - map[string]interface{}{ - "virtual_router_name": aws.StringValue(spec.Provider.VirtualRouter.VirtualRouterName), - }, - } - } - - mSpec["provider"] = []interface{}{mProvider} - } - - return []interface{}{mSpec} -} - -func expandAppmeshRouteSpec(vSpec []interface{}) *appmesh.RouteSpec { - spec := &appmesh.RouteSpec{} - - if len(vSpec) == 0 || vSpec[0] == nil { - // Empty Spec is allowed. - return spec - } - mSpec := vSpec[0].(map[string]interface{}) - - if vHttpRoute, ok := mSpec["http_route"].([]interface{}); ok && len(vHttpRoute) > 0 && vHttpRoute[0] != nil { - mHttpRoute := vHttpRoute[0].(map[string]interface{}) - - spec.HttpRoute = &appmesh.HttpRoute{} - - if vHttpRouteAction, ok := mHttpRoute["action"].([]interface{}); ok && len(vHttpRouteAction) > 0 && vHttpRouteAction[0] != nil { - mHttpRouteAction := vHttpRouteAction[0].(map[string]interface{}) - - if vWeightedTargets, ok := mHttpRouteAction["weighted_target"].(*schema.Set); ok && vWeightedTargets.Len() > 0 { - weightedTargets := []*appmesh.WeightedTarget{} - - for _, vWeightedTarget := range vWeightedTargets.List() { - weightedTarget := &appmesh.WeightedTarget{} - - mWeightedTarget := vWeightedTarget.(map[string]interface{}) - - if vVirtualNode, ok := mWeightedTarget["virtual_node"].(string); ok && vVirtualNode != "" { - weightedTarget.VirtualNode = aws.String(vVirtualNode) - } - if vWeight, ok := mWeightedTarget["weight"].(int); ok { - weightedTarget.Weight = aws.Int64(int64(vWeight)) - } - - weightedTargets = append(weightedTargets, weightedTarget) - } - - spec.HttpRoute.Action = &appmesh.HttpRouteAction{ - WeightedTargets: weightedTargets, - } - } - } - - if vHttpRouteMatch, ok := mHttpRoute["match"].([]interface{}); ok && len(vHttpRouteMatch) > 0 && vHttpRouteMatch[0] != nil { - mHttpRouteMatch := vHttpRouteMatch[0].(map[string]interface{}) - - if vPrefix, ok := mHttpRouteMatch["prefix"].(string); ok && vPrefix != "" { - spec.HttpRoute.Match = &appmesh.HttpRouteMatch{ - Prefix: aws.String(vPrefix), - } - } - } - } - - if vTcpRoute, ok := mSpec["tcp_route"].([]interface{}); ok && len(vTcpRoute) > 0 && vTcpRoute[0] != nil { - mTcpRoute := vTcpRoute[0].(map[string]interface{}) - - spec.TcpRoute = &appmesh.TcpRoute{} - - if vTcpRouteAction, ok := mTcpRoute["action"].([]interface{}); ok && len(vTcpRouteAction) > 0 && vTcpRouteAction[0] != nil { - mTcpRouteAction := vTcpRouteAction[0].(map[string]interface{}) - - if vWeightedTargets, ok := mTcpRouteAction["weighted_target"].(*schema.Set); ok && vWeightedTargets.Len() > 0 { - weightedTargets := []*appmesh.WeightedTarget{} - - for _, vWeightedTarget := range vWeightedTargets.List() { - weightedTarget := &appmesh.WeightedTarget{} - - mWeightedTarget := vWeightedTarget.(map[string]interface{}) - - if vVirtualNode, ok := mWeightedTarget["virtual_node"].(string); ok && vVirtualNode != "" { - weightedTarget.VirtualNode = aws.String(vVirtualNode) - } - if vWeight, ok := mWeightedTarget["weight"].(int); ok { - weightedTarget.Weight = aws.Int64(int64(vWeight)) - } - - weightedTargets = append(weightedTargets, weightedTarget) - } - - spec.TcpRoute.Action = &appmesh.TcpRouteAction{ - WeightedTargets: weightedTargets, - } - } - } - } - - return spec -} - -func flattenAppmeshRouteSpec(spec *appmesh.RouteSpec) []interface{} { - if spec == nil { - return []interface{}{} - } - - mSpec := map[string]interface{}{} - - if spec.HttpRoute != nil { - mHttpRoute := map[string]interface{}{} - - if spec.HttpRoute.Action != nil && spec.HttpRoute.Action.WeightedTargets != nil { - vWeightedTargets := []interface{}{} - - for _, weightedTarget := range spec.HttpRoute.Action.WeightedTargets { - mWeightedTarget := map[string]interface{}{ - "virtual_node": aws.StringValue(weightedTarget.VirtualNode), - "weight": int(aws.Int64Value(weightedTarget.Weight)), - } - - vWeightedTargets = append(vWeightedTargets, mWeightedTarget) - } - - mHttpRoute["action"] = []interface{}{ - map[string]interface{}{ - "weighted_target": schema.NewSet(appmeshRouteWeightedTargetHash, vWeightedTargets), - }, - } - } - - if spec.HttpRoute.Match != nil { - mHttpRoute["match"] = []interface{}{ - map[string]interface{}{ - "prefix": aws.StringValue(spec.HttpRoute.Match.Prefix), - }, - } - } - - mSpec["http_route"] = []interface{}{mHttpRoute} - } - - if spec.TcpRoute != nil { - mTcpRoute := map[string]interface{}{} - - if spec.TcpRoute.Action != nil && spec.TcpRoute.Action.WeightedTargets != nil { - vWeightedTargets := []interface{}{} - - for _, weightedTarget := range spec.TcpRoute.Action.WeightedTargets { - mWeightedTarget := map[string]interface{}{ - "virtual_node": aws.StringValue(weightedTarget.VirtualNode), - "weight": int(aws.Int64Value(weightedTarget.Weight)), - } - - vWeightedTargets = append(vWeightedTargets, mWeightedTarget) - } - - mTcpRoute["action"] = []interface{}{ - map[string]interface{}{ - "weighted_target": schema.NewSet(appmeshRouteWeightedTargetHash, vWeightedTargets), - }, - } - } - - mSpec["tcp_route"] = []interface{}{mTcpRoute} - } - - return []interface{}{mSpec} -} - func flattenAppsyncPipelineConfig(c *appsync.PipelineConfig) []interface{} { if c == nil { return nil diff --git a/go.mod b/go.mod index fcb34dd80b8..50218ac1be8 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/terraform-providers/terraform-provider-aws require ( - github.com/aws/aws-sdk-go v1.23.0 + github.com/aws/aws-sdk-go v1.23.8 github.com/beevik/etree v1.1.0 github.com/bflad/tfproviderlint v0.4.0 github.com/client9/misspell v0.3.4 diff --git a/go.sum b/go.sum index 3dbbcdba0df..e4eb3dc2fd2 100644 --- a/go.sum +++ b/go.sum @@ -54,8 +54,8 @@ github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.21.7 h1:ml+k7szyVaq4YD+3LhqOGl9tgMTqgMbpnuUSkB6UJvQ= github.com/aws/aws-sdk-go v1.21.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.0 h1:ilfJN/vJtFo1XDFxB2YMBYGeOvGZl6Qow17oyD4+Z9A= -github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk= +github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index c2b2c5d65c3..1a7af53a4da 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index 20510d9aec8..b20b6339484 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -76,12 +76,15 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(now().UnixNano(), 10) } - resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{ + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), }) - if err != nil { + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 62a5be95346..165250f9e59 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -320,6 +320,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -339,6 +340,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -346,8 +348,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -1589,6 +1594,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1632,6 +1638,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -1752,9 +1759,33 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1963,11 +1994,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2055,6 +2089,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2125,6 +2160,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2653,6 +2689,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2660,8 +2697,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", @@ -3135,6 +3175,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3509,9 +3550,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3934,7 +3977,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -4615,6 +4659,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index ede5341bc2e..c4e659b01d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -126,6 +126,9 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { + if err == nil { + return false + } return shouldRetryError(err) } @@ -216,9 +219,6 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { - if r.Error == nil { - return false - } if isErrCode(r.Error, r.RetryErrorCodes) { return true } @@ -246,7 +246,7 @@ func (r *Request) IsErrorThrottle() bool { } func isErrCode(err error, codes []string) bool { - if aerr, ok := err.(awserr.Error); ok { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { for _, code := range codes { if code == aerr.Code() { return true diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 3a998d5bd62..60a6f9ce2a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -99,10 +99,10 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string - CSMClientID string CSMHost string + CSMClientID string // Enables endpoint discovery via environment variables. // @@ -230,7 +230,11 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } regionKeys := regionEnvKeys profileKeys := profileEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 1b4fcdb10e1..fa1362ff740 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -104,9 +104,13 @@ func New(cfgs ...*aws.Config) *Session { } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { err = fmt.Errorf("failed to enable CSM, %v", err) s.Config.Logger.Log("ERROR:", err.Error()) @@ -347,15 +351,12 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, - clientID, host, port string, - logger aws.Logger, -) error { +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { if logger != nil { logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { return err } @@ -395,7 +396,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, // Load additional config from file(s) sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { return nil, err } } @@ -410,9 +417,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { return nil, err } @@ -428,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 5170b4982e0..d91ac93a544 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -22,6 +22,12 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` @@ -76,6 +82,12 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string } type sharedConfigFile struct { @@ -251,10 +263,13 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v - } + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) return nil } @@ -348,6 +363,16 @@ func updateString(dst *string, section ini.Section, key string) { *dst = section.String(key) } +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 75501aa5353..b4b4fd45e3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.23.0" +const SDKVersion = "1.23.8" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index de021367da2..74e361e070d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index cf569645dc2..07a6187ea62 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -39,7 +39,7 @@ func Build(r *request.Request) { r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + 0, r.RequestID, ) return diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go index 1b95e4a7ac2..ec67f20c50e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go @@ -1957,6 +1957,9 @@ func (c *AppMesh) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -1967,6 +1970,11 @@ func (c *AppMesh) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListTagsForResource func (c *AppMesh) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) @@ -2559,6 +2567,9 @@ func (c *AppMesh) TagResourceRequest(input *TagResourceInput) (req *request.Requ // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -2569,6 +2580,11 @@ func (c *AppMesh) TagResourceRequest(input *TagResourceInput) (req *request.Requ // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // * ErrCodeTooManyTagsException "TooManyTagsException" // The request exceeds the maximum allowed number of tags allowed per resource. // The current limit is 50 user tags per resource. You must reduce the number @@ -2654,6 +2670,9 @@ func (c *AppMesh) UntagResourceRequest(input *UntagResourceInput) (req *request. // * ErrCodeBadRequestException "BadRequestException" // The request syntax was malformed. Check your request syntax and try again. // +// * ErrCodeForbiddenException "ForbiddenException" +// You don't have permissions to perform this action. +// // * ErrCodeInternalServerErrorException "InternalServerErrorException" // The request processing has failed because of an unknown error, exception, // or failure. @@ -2664,6 +2683,11 @@ func (c *AppMesh) UntagResourceRequest(input *UntagResourceInput) (req *request. // * ErrCodeServiceUnavailableException "ServiceUnavailableException" // The request has failed due to a temporary failure of the service. // +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UntagResource func (c *AppMesh) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -4942,6 +4966,92 @@ func (s *FileAccessLog) SetPath(v string) *FileAccessLog { return s } +// An object representing the method and value to match the header value sent +// with a request. Specify one match method. +type HeaderMatchMethod struct { + _ struct{} `type:"structure"` + + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // The range of values to match on. The first character of the range is included + // in the range, though the last character is not. For example, if the range + // specified were 1-100, only values 1-99 would be matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` +} + +// String returns the string representation +func (s HeaderMatchMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeaderMatchMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeaderMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeaderMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExact sets the Exact field's value. +func (s *HeaderMatchMethod) SetExact(v string) *HeaderMatchMethod { + s.Exact = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *HeaderMatchMethod) SetPrefix(v string) *HeaderMatchMethod { + s.Prefix = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeaderMatchMethod) SetRange(v *MatchRange) *HeaderMatchMethod { + s.Range = v + return s +} + +// SetRegex sets the Regex field's value. +func (s *HeaderMatchMethod) SetRegex(v string) *HeaderMatchMethod { + s.Regex = &v + return s +} + +// SetSuffix sets the Suffix field's value. +func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { + s.Suffix = &v + return s +} + // An object representing the health check policy for a virtual node's listener. type HealthCheckPolicy struct { _ struct{} `type:"structure"` @@ -5174,13 +5284,82 @@ func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteActi return s } +// An object representing the HTTP header in the request. +type HttpRouteHeader struct { + _ struct{} `type:"structure"` + + Invert *bool `locationName:"invert" type:"boolean"` + + // An object representing the method and value to match the header value sent + // with a request. Specify one match method. + Match *HeaderMatchMethod `locationName:"match" type:"structure"` + + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s HttpRouteHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpRouteHeader) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRouteHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRouteHeader"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvert sets the Invert field's value. +func (s *HttpRouteHeader) SetInvert(v bool) *HttpRouteHeader { + s.Invert = &v + return s +} + +// SetMatch sets the Match field's value. +func (s *HttpRouteHeader) SetMatch(v *HeaderMatchMethod) *HttpRouteHeader { + s.Match = v + return s +} + +// SetName sets the Name field's value. +func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { + s.Name = &v + return s +} + // An object representing the requirements for a route to match HTTP requests // for a virtual router. type HttpRouteMatch struct { _ struct{} `type:"structure"` + Headers []*HttpRouteHeader `locationName:"headers" min:"1" type:"list"` + + Method *string `locationName:"method" type:"string" enum:"HttpMethod"` + // Prefix is a required field Prefix *string `locationName:"prefix" type:"string" required:"true"` + + Scheme *string `locationName:"scheme" type:"string" enum:"HttpScheme"` } // String returns the string representation @@ -5196,9 +5375,22 @@ func (s HttpRouteMatch) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *HttpRouteMatch) Validate() error { invalidParams := request.ErrInvalidParams{Context: "HttpRouteMatch"} + if s.Headers != nil && len(s.Headers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Headers", 1)) + } if s.Prefix == nil { invalidParams.Add(request.NewErrParamRequired("Prefix")) } + if s.Headers != nil { + for i, v := range s.Headers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5206,12 +5398,30 @@ func (s *HttpRouteMatch) Validate() error { return nil } +// SetHeaders sets the Headers field's value. +func (s *HttpRouteMatch) SetHeaders(v []*HttpRouteHeader) *HttpRouteMatch { + s.Headers = v + return s +} + +// SetMethod sets the Method field's value. +func (s *HttpRouteMatch) SetMethod(v string) *HttpRouteMatch { + s.Method = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *HttpRouteMatch) SetPrefix(v string) *HttpRouteMatch { s.Prefix = &v return s } +// SetScheme sets the Scheme field's value. +func (s *HttpRouteMatch) SetScheme(v string) *HttpRouteMatch { + s.Scheme = &v + return s +} + type ListMeshesInput struct { _ struct{} `type:"structure"` @@ -5840,6 +6050,57 @@ func (s *Logging) SetAccessLog(v *AccessLog) *Logging { return s } +// The range of values to match on. The first character of the range is included +// in the range, though the last character is not. For example, if the range +// specified were 1-100, only values 1-99 would be matched. +type MatchRange struct { + _ struct{} `type:"structure"` + + // End is a required field + End *int64 `locationName:"end" type:"long" required:"true"` + + // Start is a required field + Start *int64 `locationName:"start" type:"long" required:"true"` +} + +// String returns the string representation +func (s MatchRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MatchRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MatchRange"} + if s.End == nil { + invalidParams.Add(request.NewErrParamRequired("End")) + } + if s.Start == nil { + invalidParams.Add(request.NewErrParamRequired("Start")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnd sets the End field's value. +func (s *MatchRange) SetEnd(v int64) *MatchRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *MatchRange) SetStart(v int64) *MatchRange { + s.Start = &v + return s +} + // An object representing a service mesh returned by a describe operation. type MeshData struct { _ struct{} `type:"structure"` @@ -6237,6 +6498,8 @@ type RouteSpec struct { // An object representing the HTTP routing specification for a route. HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` + Priority *int64 `locationName:"priority" type:"integer"` + // An object representing the TCP routing specification for a route. TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` } @@ -6277,6 +6540,12 @@ func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { return s } +// SetPriority sets the Priority field's value. +func (s *RouteSpec) SetPriority(v int64) *RouteSpec { + s.Priority = &v + return s +} + // SetTcpRoute sets the TcpRoute field's value. func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { s.TcpRoute = v @@ -8015,6 +8284,43 @@ const ( EgressFilterTypeDropAll = "DROP_ALL" ) +const ( + // HttpMethodConnect is a HttpMethod enum value + HttpMethodConnect = "CONNECT" + + // HttpMethodDelete is a HttpMethod enum value + HttpMethodDelete = "DELETE" + + // HttpMethodGet is a HttpMethod enum value + HttpMethodGet = "GET" + + // HttpMethodHead is a HttpMethod enum value + HttpMethodHead = "HEAD" + + // HttpMethodOptions is a HttpMethod enum value + HttpMethodOptions = "OPTIONS" + + // HttpMethodPatch is a HttpMethod enum value + HttpMethodPatch = "PATCH" + + // HttpMethodPost is a HttpMethod enum value + HttpMethodPost = "POST" + + // HttpMethodPut is a HttpMethod enum value + HttpMethodPut = "PUT" + + // HttpMethodTrace is a HttpMethod enum value + HttpMethodTrace = "TRACE" +) + +const ( + // HttpSchemeHttp is a HttpScheme enum value + HttpSchemeHttp = "http" + + // HttpSchemeHttps is a HttpScheme enum value + HttpSchemeHttps = "https" +) + const ( // MeshStatusCodeActive is a MeshStatusCode enum value MeshStatusCodeActive = "ACTIVE" diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go index f1b608c572d..a74c53d10ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go @@ -2718,7 +2718,7 @@ type GetQueryResultsInput struct { _ struct{} `type:"structure"` // The maximum number of results (rows) to return in this request. - MaxResults *int64 `type:"integer"` + MaxResults *int64 `min:"1" type:"integer"` // The token that specifies where to start pagination if a previous request // was truncated. @@ -2743,6 +2743,9 @@ func (s GetQueryResultsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetQueryResultsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } @@ -3534,8 +3537,7 @@ func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutio // The location in Amazon S3 where query results are stored and the encryption // option, if any, used for query results. These are known as "client-side settings". // If workgroup settings override client-side settings, then the query uses -// the location for the query results and the encryption configuration that -// are specified for the workgroup. +// the workgroup settings. type ResultConfiguration struct { _ struct{} `type:"structure"` @@ -3549,12 +3551,13 @@ type ResultConfiguration struct { EncryptionConfiguration *EncryptionConfiguration `type:"structure"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // For more information, see Queries and Query Result Files. (https://docs.aws.amazon.com/athena/latest/ug/querying.html) + // To run the query, you must specify the query results location using one of + // the ways: either for individual queries using either this setting (client-side), + // or in the workgroup, using WorkGroupConfiguration. If none of them is set, + // Athena issues an error that no output location is provided. For more information, + // see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). // If workgroup settings override client-side settings, then the query uses - // the location for the query results and the encryption configuration that - // are specified for the workgroup. The "workgroup settings override" is specified - // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + // the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. OutputLocation *string `type:"string"` } @@ -3604,7 +3607,7 @@ type ResultConfigurationUpdates struct { EncryptionConfiguration *EncryptionConfiguration `type:"structure"` // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // For more information, see Queries and Query Result Files. (https://docs.aws.amazon.com/athena/latest/ug/querying.html) + // For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) // If workgroup settings override client-side settings, then the query uses // the location for the query results and the encryption configuration that // are specified for the workgroup. The "workgroup settings override" is specified @@ -4323,8 +4326,8 @@ type WorkGroup struct { // S3 where query results are stored, the encryption configuration, if any, // used for query results; whether the Amazon CloudWatch Metrics are enabled // for the workgroup; whether workgroup settings override client-side settings; - // and the data usage limit for the amount of data scanned per query, if it - // is specified. The workgroup settings override is specified in EnforceWorkGroupConfiguration + // and the data usage limits for the amount of data scanned per query or per + // workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. Configuration *WorkGroupConfiguration `type:"structure"` @@ -4387,7 +4390,7 @@ func (s *WorkGroup) SetState(v string) *WorkGroup { // S3 where query results are stored, the encryption option, if any, used for // query results, whether the Amazon CloudWatch Metrics are enabled for the // workgroup and whether workgroup settings override query settings, and the -// data usage limit for the amount of data scanned per query, if it is specified. +// data usage limits for the amount of data scanned per query or per workgroup. // The workgroup settings override is specified in EnforceWorkGroupConfiguration // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. type WorkGroupConfiguration struct { @@ -4405,9 +4408,22 @@ type WorkGroupConfiguration struct { // Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. PublishCloudWatchMetricsEnabled *bool `type:"boolean"` + // If set to true, allows members assigned to a workgroup to reference Amazon + // S3 Requester Pays buckets in queries. If set to false, workgroup members + // cannot query data from Requester Pays buckets, and queries that retrieve + // data from Requester Pays buckets cause an error. The default is false. For + // more information about Requester Pays buckets, see Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) + // in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `type:"boolean"` + // The configuration for the workgroup, which includes the location in Amazon // S3 where query results are stored and the encryption option, if any, used - // for query results. + // for query results. To run the query, you must specify the query results location + // using one of the ways: either in the workgroup using this setting, or for + // individual queries (client-side), using ResultConfiguration$OutputLocation. + // If none of them is set, Athena issues an error that no output location is + // provided. For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). ResultConfiguration *ResultConfiguration `type:"structure"` } @@ -4457,6 +4473,12 @@ func (s *WorkGroupConfiguration) SetPublishCloudWatchMetricsEnabled(v bool) *Wor return s } +// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. +func (s *WorkGroupConfiguration) SetRequesterPaysEnabled(v bool) *WorkGroupConfiguration { + s.RequesterPaysEnabled = &v + return s +} + // SetResultConfiguration sets the ResultConfiguration field's value. func (s *WorkGroupConfiguration) SetResultConfiguration(v *ResultConfiguration) *WorkGroupConfiguration { s.ResultConfiguration = v @@ -4487,6 +4509,15 @@ type WorkGroupConfigurationUpdates struct { // Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery RemoveBytesScannedCutoffPerQuery *bool `type:"boolean"` + // If set to true, allows members assigned to a workgroup to specify Amazon + // S3 Requester Pays buckets in queries. If set to false, workgroup members + // cannot query data from Requester Pays buckets, and queries that retrieve + // data from Requester Pays buckets cause an error. The default is false. For + // more information about Requester Pays buckets, see Requester Pays Buckets + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) + // in the Amazon Simple Storage Service Developer Guide. + RequesterPaysEnabled *bool `type:"boolean"` + // The result configuration information about the queries in this workgroup // that will be updated. Includes the updated results location and an updated // option for encrypting query results. @@ -4545,6 +4576,12 @@ func (s *WorkGroupConfigurationUpdates) SetRemoveBytesScannedCutoffPerQuery(v bo return s } +// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. +func (s *WorkGroupConfigurationUpdates) SetRequesterPaysEnabled(v bool) *WorkGroupConfigurationUpdates { + s.RequesterPaysEnabled = &v + return s +} + // SetResultConfigurationUpdates sets the ResultConfigurationUpdates field's value. func (s *WorkGroupConfigurationUpdates) SetResultConfigurationUpdates(v *ResultConfigurationUpdates) *WorkGroupConfigurationUpdates { s.ResultConfigurationUpdates = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index 5bd662a00df..4288448dbb5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -162,6 +162,117 @@ func (c *CodeCommit) BatchDescribeMergeConflictsWithContext(ctx aws.Context, inp return out, req.Send() } +const opBatchGetCommits = "BatchGetCommits" + +// BatchGetCommitsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetCommits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetCommits for more information on using the BatchGetCommits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetCommitsRequest method. +// req, resp := client.BatchGetCommitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchGetCommits +func (c *CodeCommit) BatchGetCommitsRequest(input *BatchGetCommitsInput) (req *request.Request, output *BatchGetCommitsOutput) { + op := &request.Operation{ + Name: opBatchGetCommits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetCommitsInput{} + } + + output = &BatchGetCommitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetCommits API operation for AWS CodeCommit. +// +// Returns information about the contents of one or more commits in a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation BatchGetCommits for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCommitIdsListRequiredException "CommitIdsListRequiredException" +// +// * ErrCodeCommitIdsLimitExceededException "CommitIdsLimitExceededException" +// The maximum number of allowed commit IDs in a batch request is 100. Verify +// that your batch requests contains no more than 100 commit IDs, and then try +// again. +// +// * ErrCodeRepositoryNameRequiredException "RepositoryNameRequiredException" +// A repository name is required but was not specified. +// +// * ErrCodeInvalidRepositoryNameException "InvalidRepositoryNameException" +// At least one specified repository name is not valid. +// +// This exception only occurs when a specified repository name is not valid. +// Other exceptions occur when a required repository parameter is missing, or +// when a specified repository does not exist. +// +// * ErrCodeRepositoryDoesNotExistException "RepositoryDoesNotExistException" +// The specified repository does not exist. +// +// * ErrCodeEncryptionIntegrityChecksFailedException "EncryptionIntegrityChecksFailedException" +// An encryption integrity check failed. +// +// * ErrCodeEncryptionKeyAccessDeniedException "EncryptionKeyAccessDeniedException" +// An encryption key could not be accessed. +// +// * ErrCodeEncryptionKeyDisabledException "EncryptionKeyDisabledException" +// The encryption key is disabled. +// +// * ErrCodeEncryptionKeyNotFoundException "EncryptionKeyNotFoundException" +// No encryption key was found. +// +// * ErrCodeEncryptionKeyUnavailableException "EncryptionKeyUnavailableException" +// The encryption key is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/BatchGetCommits +func (c *CodeCommit) BatchGetCommits(input *BatchGetCommitsInput) (*BatchGetCommitsOutput, error) { + req, out := c.BatchGetCommitsRequest(input) + return out, req.Send() +} + +// BatchGetCommitsWithContext is the same as BatchGetCommits with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetCommits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) BatchGetCommitsWithContext(ctx aws.Context, input *BatchGetCommitsInput, opts ...request.Option) (*BatchGetCommitsOutput, error) { + req, out := c.BatchGetCommitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchGetRepositories = "BatchGetRepositories" // BatchGetRepositoriesRequest generates a "aws/request.Request" representing the @@ -7978,6 +8089,142 @@ func (s *BatchDescribeMergeConflictsOutput) SetSourceCommitId(v string) *BatchDe return s } +// Returns information about errors in a BatchGetCommits operation. +type BatchGetCommitsError struct { + _ struct{} `type:"structure"` + + // A commit ID that either could not be found or was not in a valid format. + CommitId *string `locationName:"commitId" type:"string"` + + // An error code that specifies whether the commit ID was not valid or not found. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // An error message that provides detail about why the commit ID either was + // not found or was not valid. + ErrorMessage *string `locationName:"errorMessage" type:"string"` +} + +// String returns the string representation +func (s BatchGetCommitsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsError) GoString() string { + return s.String() +} + +// SetCommitId sets the CommitId field's value. +func (s *BatchGetCommitsError) SetCommitId(v string) *BatchGetCommitsError { + s.CommitId = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *BatchGetCommitsError) SetErrorCode(v string) *BatchGetCommitsError { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *BatchGetCommitsError) SetErrorMessage(v string) *BatchGetCommitsError { + s.ErrorMessage = &v + return s +} + +type BatchGetCommitsInput struct { + _ struct{} `type:"structure"` + + // The full commit IDs of the commits to get information about. + // + // You must supply the full SHAs of each commit. You cannot use shortened SHAs. + // + // CommitIds is a required field + CommitIds []*string `locationName:"commitIds" type:"list" required:"true"` + + // The name of the repository that contains the commits. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchGetCommitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetCommitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetCommitsInput"} + if s.CommitIds == nil { + invalidParams.Add(request.NewErrParamRequired("CommitIds")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommitIds sets the CommitIds field's value. +func (s *BatchGetCommitsInput) SetCommitIds(v []*string) *BatchGetCommitsInput { + s.CommitIds = v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *BatchGetCommitsInput) SetRepositoryName(v string) *BatchGetCommitsInput { + s.RepositoryName = &v + return s +} + +type BatchGetCommitsOutput struct { + _ struct{} `type:"structure"` + + // An array of commit data type objects, each of which contains information + // about a specified commit. + Commits []*Commit `locationName:"commits" type:"list"` + + // Returns any commit IDs for which information could not be found. For example, + // if one of the commit IDs was a shortened SHA or that commit was not found + // in the specified repository, the ID will return an error object with additional + // information. + Errors []*BatchGetCommitsError `locationName:"errors" type:"list"` +} + +// String returns the string representation +func (s BatchGetCommitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetCommitsOutput) GoString() string { + return s.String() +} + +// SetCommits sets the Commits field's value. +func (s *BatchGetCommitsOutput) SetCommits(v []*Commit) *BatchGetCommitsOutput { + s.Commits = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *BatchGetCommitsOutput) SetErrors(v []*BatchGetCommitsError) *BatchGetCommitsOutput { + s.Errors = v + return s +} + // Represents the input of a batch get repositories operation. type BatchGetRepositoriesInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go index 9a9e7a37e01..9ee44edfbe4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go @@ -60,6 +60,9 @@ // // Commits, by calling the following: // +// * BatchGetCommits, which returns information about one or more commits +// in a repository +// // * CreateCommit, which creates a commit for changes to a repository. // // * GetCommit, which returns information about a commit, including commit diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index 2655d6b6a81..210448016d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -130,6 +130,18 @@ const ( // A commit ID was not specified. ErrCodeCommitIdRequiredException = "CommitIdRequiredException" + // ErrCodeCommitIdsLimitExceededException for service response error code + // "CommitIdsLimitExceededException". + // + // The maximum number of allowed commit IDs in a batch request is 100. Verify + // that your batch requests contains no more than 100 commit IDs, and then try + // again. + ErrCodeCommitIdsLimitExceededException = "CommitIdsLimitExceededException" + + // ErrCodeCommitIdsListRequiredException for service response error code + // "CommitIdsListRequiredException". + ErrCodeCommitIdsListRequiredException = "CommitIdsListRequiredException" + // ErrCodeCommitMessageLengthExceededException for service response error code // "CommitMessageLengthExceededException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go index 88ee4481059..46efb54c1a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go @@ -229,6 +229,90 @@ func (c *CostandUsageReportService) DescribeReportDefinitionsPagesWithContext(ct return p.Err() } +const opModifyReportDefinition = "ModifyReportDefinition" + +// ModifyReportDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReportDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyReportDefinition for more information on using the ModifyReportDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyReportDefinitionRequest method. +// req, resp := client.ModifyReportDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cur-2017-01-06/ModifyReportDefinition +func (c *CostandUsageReportService) ModifyReportDefinitionRequest(input *ModifyReportDefinitionInput) (req *request.Request, output *ModifyReportDefinitionOutput) { + op := &request.Operation{ + Name: opModifyReportDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReportDefinitionInput{} + } + + output = &ModifyReportDefinitionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ModifyReportDefinition API operation for AWS Cost and Usage Report Service. +// +// Allows you to programatically update your report preferences. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Cost and Usage Report Service's +// API operation ModifyReportDefinition for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalErrorException "InternalErrorException" +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * ErrCodeValidationException "ValidationException" +// The input fails to satisfy the constraints specified by an AWS service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cur-2017-01-06/ModifyReportDefinition +func (c *CostandUsageReportService) ModifyReportDefinition(input *ModifyReportDefinitionInput) (*ModifyReportDefinitionOutput, error) { + req, out := c.ModifyReportDefinitionRequest(input) + return out, req.Send() +} + +// ModifyReportDefinitionWithContext is the same as ModifyReportDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReportDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CostandUsageReportService) ModifyReportDefinitionWithContext(ctx aws.Context, input *ModifyReportDefinitionInput, opts ...request.Option) (*ModifyReportDefinitionOutput, error) { + req, out := c.ModifyReportDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutReportDefinition = "PutReportDefinition" // PutReportDefinitionRequest generates a "aws/request.Request" representing the @@ -449,6 +533,80 @@ func (s *DescribeReportDefinitionsOutput) SetReportDefinitions(v []*ReportDefini return s } +type ModifyReportDefinitionInput struct { + _ struct{} `type:"structure"` + + // The definition of AWS Cost and Usage Report. You can specify the report name, + // time unit, report format, compression format, S3 bucket, additional artifacts, + // and schema elements in the definition. + // + // ReportDefinition is a required field + ReportDefinition *ReportDefinition `type:"structure" required:"true"` + + // The name of the report that you want to create. The name must be unique, + // is case sensitive, and can't include spaces. + // + // ReportName is a required field + ReportName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyReportDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReportDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReportDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReportDefinitionInput"} + if s.ReportDefinition == nil { + invalidParams.Add(request.NewErrParamRequired("ReportDefinition")) + } + if s.ReportName == nil { + invalidParams.Add(request.NewErrParamRequired("ReportName")) + } + if s.ReportDefinition != nil { + if err := s.ReportDefinition.Validate(); err != nil { + invalidParams.AddNested("ReportDefinition", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReportDefinition sets the ReportDefinition field's value. +func (s *ModifyReportDefinitionInput) SetReportDefinition(v *ReportDefinition) *ModifyReportDefinitionInput { + s.ReportDefinition = v + return s +} + +// SetReportName sets the ReportName field's value. +func (s *ModifyReportDefinitionInput) SetReportName(v string) *ModifyReportDefinitionInput { + s.ReportName = &v + return s +} + +type ModifyReportDefinitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyReportDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReportDefinitionOutput) GoString() string { + return s.String() +} + // Creates a Cost and Usage Report. type PutReportDefinitionInput struct { _ struct{} `type:"structure"` @@ -714,6 +872,9 @@ const ( // AWSRegionApNortheast3 is a AWSRegion enum value AWSRegionApNortheast3 = "ap-northeast-3" + + // AWSRegionApEast1 is a AWSRegion enum value + AWSRegionApEast1 = "ap-east-1" ) // The types of manifest that you want AWS to create for this report. diff --git a/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go b/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go index 765189dc94a..a7b421d05f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go @@ -459,6 +459,88 @@ func (c *DataSync) CreateLocationS3WithContext(ctx aws.Context, input *CreateLoc return out, req.Send() } +const opCreateLocationSmb = "CreateLocationSmb" + +// CreateLocationSmbRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationSmb operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationSmb for more information on using the CreateLocationSmb +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationSmbRequest method. +// req, resp := client.CreateLocationSmbRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationSmb +func (c *DataSync) CreateLocationSmbRequest(input *CreateLocationSmbInput) (req *request.Request, output *CreateLocationSmbOutput) { + op := &request.Operation{ + Name: opCreateLocationSmb, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationSmbInput{} + } + + output = &CreateLocationSmbOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationSmb API operation for AWS DataSync. +// +// Creates an endpoint for a Server Message Block (SMB) file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationSmb for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception is thrown when the client submits a malformed request. +// +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationSmb +func (c *DataSync) CreateLocationSmb(input *CreateLocationSmbInput) (*CreateLocationSmbOutput, error) { + req, out := c.CreateLocationSmbRequest(input) + return out, req.Send() +} + +// CreateLocationSmbWithContext is the same as CreateLocationSmb with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationSmb for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationSmbWithContext(ctx aws.Context, input *CreateLocationSmbInput, opts ...request.Option) (*CreateLocationSmbOutput, error) { + req, out := c.CreateLocationSmbRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTask = "CreateTask" // CreateTaskRequest generates a "aws/request.Request" representing the @@ -1140,6 +1222,88 @@ func (c *DataSync) DescribeLocationS3WithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opDescribeLocationSmb = "DescribeLocationSmb" + +// DescribeLocationSmbRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationSmb operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationSmb for more information on using the DescribeLocationSmb +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationSmbRequest method. +// req, resp := client.DescribeLocationSmbRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationSmb +func (c *DataSync) DescribeLocationSmbRequest(input *DescribeLocationSmbInput) (req *request.Request, output *DescribeLocationSmbOutput) { + op := &request.Operation{ + Name: opDescribeLocationSmb, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationSmbInput{} + } + + output = &DescribeLocationSmbOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationSmb API operation for AWS DataSync. +// +// Returns metadata, such as the path and user information about a SMB location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationSmb for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception is thrown when the client submits a malformed request. +// +// * ErrCodeInternalException "InternalException" +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationSmb +func (c *DataSync) DescribeLocationSmb(input *DescribeLocationSmbInput) (*DescribeLocationSmbOutput, error) { + req, out := c.DescribeLocationSmbRequest(input) + return out, req.Send() +} + +// DescribeLocationSmbWithContext is the same as DescribeLocationSmb with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationSmb for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationSmbWithContext(ctx aws.Context, input *DescribeLocationSmbInput, opts ...request.Option) (*DescribeLocationSmbOutput, error) { + req, out := c.DescribeLocationSmbRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeTask = "DescribeTask" // DescribeTaskRequest generates a "aws/request.Request" representing the @@ -3086,6 +3250,188 @@ func (s *CreateLocationS3Output) SetLocationArn(v string) *CreateLocationS3Outpu return s } +// CreateLocationSmbRequest +type CreateLocationSmbInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Names (ARNs) of agents to use for a Simple Message Block + // (SMB) location. + // + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` + + // The name of the domain that the SMB server belongs to. + Domain *string `type:"string"` + + // The mount options that are available for DataSync to use to access an SMB + // location. + MountOptions *SmbMountOptions `type:"structure"` + + // The password of the user who has permission to access the SMB server. + // + // Password is a required field + Password *string `type:"string" required:"true"` + + // The name of the SMB server. This value is the IP address or Domain Name Service + // (DNS) name of the SMB server. An agent that is installed on-premises uses + // this host name to mount the SMB server in a network. + // + // This name must either be DNS-compliant or must be an IP version 4 (IPv4) + // address. + // + // ServerHostname is a required field + ServerHostname *string `type:"string" required:"true"` + + // The subdirectory in the SMB file system that is used to read data from the + // SMB source location or write data to the SMB destination. The SMB path should + // be a path that's exported by the SMB server, or a subdirectory of that path. + // The path should be such that it can be mounted by other SMB clients in your + // network. + // + // To transfer all the data in the folder you specified, DataSync needs to have + // permissions to mount the SMB share, as well as to access all the data in + // that share. To ensure this, either ensure that the user/password specified + // belongs to the user who can mount the share, and who has the appropriate + // permissions for all of the files and directories that you want DataSync to + // access, or use credentials of a member of the Backup Operators group to mount + // the share. Doing either enables the agent to access the data. For the agent + // to access directories, you must additionally enable all execute access. + // + // Subdirectory is a required field + Subdirectory *string `type:"string" required:"true"` + + // The key-value pair that represents the tag that you want to add to the location. + // The value can be an empty string. We recommend using tags to name your resources. + Tags []*TagListEntry `type:"list"` + + // The user who can mount the share, has the permissions to access files and + // directories in the SMB share. + // + // User is a required field + User *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLocationSmbInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationSmbInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationSmbInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationSmbInput"} + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.ServerHostname == nil { + invalidParams.Add(request.NewErrParamRequired("ServerHostname")) + } + if s.Subdirectory == nil { + invalidParams.Add(request.NewErrParamRequired("Subdirectory")) + } + if s.User == nil { + invalidParams.Add(request.NewErrParamRequired("User")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationSmbInput) SetAgentArns(v []*string) *CreateLocationSmbInput { + s.AgentArns = v + return s +} + +// SetDomain sets the Domain field's value. +func (s *CreateLocationSmbInput) SetDomain(v string) *CreateLocationSmbInput { + s.Domain = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *CreateLocationSmbInput) SetMountOptions(v *SmbMountOptions) *CreateLocationSmbInput { + s.MountOptions = v + return s +} + +// SetPassword sets the Password field's value. +func (s *CreateLocationSmbInput) SetPassword(v string) *CreateLocationSmbInput { + s.Password = &v + return s +} + +// SetServerHostname sets the ServerHostname field's value. +func (s *CreateLocationSmbInput) SetServerHostname(v string) *CreateLocationSmbInput { + s.ServerHostname = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationSmbInput) SetSubdirectory(v string) *CreateLocationSmbInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationSmbInput) SetTags(v []*TagListEntry) *CreateLocationSmbInput { + s.Tags = v + return s +} + +// SetUser sets the User field's value. +func (s *CreateLocationSmbInput) SetUser(v string) *CreateLocationSmbInput { + s.User = &v + return s +} + +// CreateLocationSmbResponse +type CreateLocationSmbOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the source SMB file system location that + // is created. + LocationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLocationSmbOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationSmbOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationSmbOutput) SetLocationArn(v string) *CreateLocationSmbOutput { + s.LocationArn = &v + return s +} + // CreateTaskRequest type CreateTaskInput struct { _ struct{} `type:"structure"` @@ -3464,7 +3810,8 @@ type DescribeAgentOutput struct { // The name of the agent. Name *string `min:"1" type:"string"` - // The subnet and the security group that DataSync used to access a VPC endpoint. + // The VPC endpoint, subnet and security group that an agent uses to access + // IP addresses in a VPC (Virtual Private Cloud). PrivateLinkConfig *PrivateLinkConfig `type:"structure"` // The status of the agent. If the status is ONLINE, then the agent is configured @@ -3814,6 +4161,125 @@ func (s *DescribeLocationS3Output) SetS3Config(v *S3Config) *DescribeLocationS3O return s } +// DescribeLocationSmbRequest +type DescribeLocationSmbInput struct { + _ struct{} `type:"structure"` + + // The Amazon resource Name (ARN) of the SMB location to describe. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLocationSmbInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationSmbInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationSmbInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationSmbInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationSmbInput) SetLocationArn(v string) *DescribeLocationSmbInput { + s.LocationArn = &v + return s +} + +// DescribeLocationSmbResponse +type DescribeLocationSmbOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the source SMB file system location that + // is created. + AgentArns []*string `min:"1" type:"list"` + + // The time that the SMB location was created. + CreationTime *time.Time `type:"timestamp"` + + // The name of the domain that the SMB server belongs to. + Domain *string `type:"string"` + + // The Amazon resource Name (ARN) of the SMB location that was described. + LocationArn *string `type:"string"` + + // The URL of the source SBM location that was described. + LocationUri *string `type:"string"` + + // The mount options that are available for DataSync to use to access an SMB + // location. + MountOptions *SmbMountOptions `type:"structure"` + + // The user who is logged on the SMB server. + User *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLocationSmbOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationSmbOutput) GoString() string { + return s.String() +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationSmbOutput) SetAgentArns(v []*string) *DescribeLocationSmbOutput { + s.AgentArns = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationSmbOutput) SetCreationTime(v time.Time) *DescribeLocationSmbOutput { + s.CreationTime = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *DescribeLocationSmbOutput) SetDomain(v string) *DescribeLocationSmbOutput { + s.Domain = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationSmbOutput) SetLocationArn(v string) *DescribeLocationSmbOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationSmbOutput) SetLocationUri(v string) *DescribeLocationSmbOutput { + s.LocationUri = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *DescribeLocationSmbOutput) SetMountOptions(v *SmbMountOptions) *DescribeLocationSmbOutput { + s.MountOptions = v + return s +} + +// SetUser sets the User field's value. +func (s *DescribeLocationSmbOutput) SetUser(v string) *DescribeLocationSmbOutput { + s.User = &v + return s +} + // DescribeTaskExecutionRequest type DescribeTaskExecutionInput struct { _ struct{} `type:"structure"` @@ -5100,6 +5566,34 @@ func (s *S3Config) SetBucketAccessRoleArn(v string) *S3Config { return s } +// Represents the mount options that are available for DataSync to access an +// SMB location. +type SmbMountOptions struct { + _ struct{} `type:"structure"` + + // The specific SMB version that you want DataSync to use to mount your SMB + // share. If you don't specify a version, DataSync defaults to AUTOMATIC. That + // is, DataSync automatically selects a version based on negotiation with the + // SMB Server server. + Version *string `type:"string" enum:"SmbVersion"` +} + +// String returns the string representation +func (s SmbMountOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SmbMountOptions) GoString() string { + return s.String() +} + +// SetVersion sets the Version field's value. +func (s *SmbMountOptions) SetVersion(v string) *SmbMountOptions { + s.Version = &v + return s +} + // StartTaskExecutionRequest type StartTaskExecutionInput struct { _ struct{} `type:"structure"` @@ -5848,6 +6342,17 @@ const ( PreserveDevicesPreserve = "PRESERVE" ) +const ( + // SmbVersionAutomatic is a SmbVersion enum value + SmbVersionAutomatic = "AUTOMATIC" + + // SmbVersionSmb2 is a SmbVersion enum value + SmbVersionSmb2 = "SMB2" + + // SmbVersionSmb3 is a SmbVersion enum value + SmbVersionSmb3 = "SMB3" +) + const ( // TaskExecutionStatusLaunching is a TaskExecutionStatus enum value TaskExecutionStatusLaunching = "LAUNCHING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 0385cb15b4f..4b4802db0ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -12685,6 +12685,80 @@ func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeEla return out, req.Send() } +const opDescribeExportImageTasks = "DescribeExportImageTasks" + +// DescribeExportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportImageTasks for more information on using the DescribeExportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportImageTasksRequest method. +// req, resp := client.DescribeExportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInput) (req *request.Request, output *DescribeExportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportImageTasksInput{} + } + + output = &DescribeExportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export image tasks or all your export image tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasks(input *DescribeExportImageTasksInput) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeExportImageTasksWithContext is the same as DescribeExportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, opts ...request.Option) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the @@ -12729,7 +12803,8 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export tasks or all your export tasks. +// Describes the specified export instance tasks or all your export instance +// tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -24001,6 +24076,82 @@ func (c *EC2) ExportClientVpnClientConfigurationWithContext(ctx aws.Context, inp return out, req.Send() } +const opExportImage = "ExportImage" + +// ExportImageRequest generates a "aws/request.Request" representing the +// client's request for the ExportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportImage for more information on using the ExportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportImageRequest method. +// req, resp := client.ExportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, output *ExportImageOutput) { + op := &request.Operation{ + Name: opExportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportImageInput{} + } + + output = &ExportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportImage API operation for Amazon Elastic Compute Cloud. +// +// Exports an Amazon Machine Image (AMI) to a VM file. For more information, +// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImage(input *ExportImageInput) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + return out, req.Send() +} + +// ExportImageWithContext is the same as ExportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ExportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportImageWithContext(ctx aws.Context, input *ExportImageInput, opts ...request.Option) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opExportTransitGatewayRoutes = "ExportTransitGatewayRoutes" // ExportTransitGatewayRoutesRequest generates a "aws/request.Request" representing the @@ -24121,6 +24272,12 @@ func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUs // GetCapacityReservationUsage API operation for Amazon Elastic Compute Cloud. // +// Gets usage information about a Capacity Reservation. If the Capacity Reservation +// is shared, it shows usage information for the Capacity Reservation owner +// and each AWS account that is currently using the shared capacity. If the +// Capacity Reservation is not shared, it shows only the Capacity Reservation +// owner's usage. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -25943,8 +26100,35 @@ func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, // // Modifies the specified EC2 Fleet. // +// You can only modify an EC2 Fleet request of type maintain. +// // While the EC2 Fleet is being modified, it is in the modifying state. // +// To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches +// the additional Spot Instances according to the allocation strategy for the +// EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet +// launches instances using the Spot Instance pool with the lowest price. If +// the allocation strategy is diversified, the EC2 Fleet distributes the instances +// across the Spot Instance pools. If the allocation strategy is capacityOptimized, +// EC2 Fleet launches instances from Spot Instance pools with optimal capacity +// for the number of instances that are launching. +// +// To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the EC2 Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the EC2 Fleet terminates the instances with the highest price +// per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet +// terminates the instances in the Spot Instance pools that have the least available +// Spot Instance capacity. If the allocation strategy is diversified, the EC2 +// Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the EC2 Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your EC2 Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -27097,6 +27281,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // or remove specified AWS account IDs from a snapshot's list of create volume // permissions, but you cannot do both in a single operation. If you need to // both add and remove account IDs for a snapshot, you must use multiple operations. +// You can make up to 500 modifications to a snapshot in a single operation. // // Encrypted snapshots and snapshots with AWS Marketplace product codes cannot // be made public. Snapshots encrypted with your default CMK cannot be shared @@ -27187,19 +27372,24 @@ func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) // To scale up your Spot Fleet, increase its target capacity. The Spot Fleet // launches the additional Spot Instances according to the allocation strategy // for the Spot Fleet request. If the allocation strategy is lowestPrice, the -// Spot Fleet launches instances using the Spot pool with the lowest price. -// If the allocation strategy is diversified, the Spot Fleet distributes the -// instances across the Spot pools. +// Spot Fleet launches instances using the Spot Instance pool with the lowest +// price. If the allocation strategy is diversified, the Spot Fleet distributes +// the instances across the Spot Instance pools. If the allocation strategy +// is capacityOptimized, Spot Fleet launches instances from Spot Instance pools +// with optimal capacity for the number of instances that are launching. // // To scale down your Spot Fleet, decrease its target capacity. First, the Spot // Fleet cancels any open requests that exceed the new target capacity. You // can request that the Spot Fleet terminate Spot Instances until the size of // the fleet no longer exceeds the new target capacity. If the allocation strategy // is lowestPrice, the Spot Fleet terminates the instances with the highest -// price per unit. If the allocation strategy is diversified, the Spot Fleet -// terminates instances across the Spot pools. Alternatively, you can request -// that the Spot Fleet keep the fleet at its current size, but not replace any -// Spot Instances that are interrupted or that you terminate manually. +// price per unit. If the allocation strategy is capacityOptimized, the Spot +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the Spot Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. // // If you are finished with your Spot Fleet for now, but will use it again later, // you can set the target capacity to 0. @@ -28484,6 +28674,80 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" + +// ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelCertificate for more information on using the ModifyVpnTunnelCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelCertificateRequest method. +// req, resp := client.ModifyVpnTunnelCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificateRequest(input *ModifyVpnTunnelCertificateInput) (req *request.Request, output *ModifyVpnTunnelCertificateOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelCertificateInput{} + } + + output = &ModifyVpnTunnelCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelCertificate API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPN tunnel endpoint certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelCertificate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificate(input *ModifyVpnTunnelCertificateInput) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelCertificateWithContext is the same as ModifyVpnTunnelCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelCertificateWithContext(ctx aws.Context, input *ModifyVpnTunnelCertificateInput, opts ...request.Option) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -30179,10 +30443,10 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // You can submit a single request that includes multiple launch specifications // that vary by instance type, AMI, Availability Zone, or subnet. // -// By default, the Spot Fleet requests Spot Instances in the Spot pool where -// the price per unit is the lowest. Each launch specification can include its -// own instance weighting that reflects the value of the instance type to your -// application workload. +// By default, the Spot Fleet requests Spot Instances in the Spot Instance pool +// where the price per unit is the lowest. Each launch specification can include +// its own instance weighting that reflects the value of the instance type to +// your application workload. // // Alternatively, you can specify that the Spot Fleet distribute the target // capacity across the Spot pools included in its launch specifications. By @@ -31384,6 +31648,98 @@ func (c *EC2) SearchTransitGatewayRoutesWithContext(ctx aws.Context, input *Sear return out, req.Send() } +const opSendDiagnosticInterrupt = "SendDiagnosticInterrupt" + +// SendDiagnosticInterruptRequest generates a "aws/request.Request" representing the +// client's request for the SendDiagnosticInterrupt operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendDiagnosticInterrupt for more information on using the SendDiagnosticInterrupt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendDiagnosticInterruptRequest method. +// req, resp := client.SendDiagnosticInterruptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput) (req *request.Request, output *SendDiagnosticInterruptOutput) { + op := &request.Operation{ + Name: opSendDiagnosticInterrupt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendDiagnosticInterruptInput{} + } + + output = &SendDiagnosticInterruptOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendDiagnosticInterrupt API operation for Amazon Elastic Compute Cloud. +// +// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger +// a kernel panic (on Linux instances), or a blue screen/stop error (on Windows +// instances). For instances based on Intel and AMD processors, the interrupt +// is received as a non-maskable interrupt (NMI). +// +// In general, the operating system crashes and reboots when a kernel panic +// or stop error is triggered. The operating system can also be configured to +// perform diagnostic tasks, such as generating a memory dump file, loading +// a secondary kernel, or obtaining a call trace. +// +// Before sending a diagnostic interrupt to your instance, ensure that its operating +// system is configured to perform the required diagnostic tasks. +// +// For more information about configuring your operating system to generate +// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic +// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) +// (Windows instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SendDiagnosticInterrupt for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterrupt(input *SendDiagnosticInterruptInput) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + return out, req.Send() +} + +// SendDiagnosticInterruptWithContext is the same as SendDiagnosticInterrupt with the addition of +// the ability to pass a context and additional request options. +// +// See SendDiagnosticInterrupt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SendDiagnosticInterruptWithContext(ctx aws.Context, input *SendDiagnosticInterruptInput, opts ...request.Option) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the @@ -36457,12 +36813,14 @@ type CapacityReservation struct { // The Availability Zone in which the capacity is reserved. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The Availability Zone ID of the Capacity Reservation. AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` // The remaining capacity. Indicates the number of instances that can be launched // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The Amazon Resource Name (ARN) of the Capacity Reservation. CapacityReservationArn *string `locationName:"capacityReservationArn" type:"string"` // The ID of the Capacity Reservation. @@ -36519,6 +36877,7 @@ type CapacityReservation struct { // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // The ID of the AWS account that owns the Capacity Reservation. OwnerId *string `locationName:"ownerId" type:"string"` // The current state of the Capacity Reservation. A Capacity Reservation can @@ -36527,11 +36886,11 @@ type CapacityReservation struct { // * active - The Capacity Reservation is active and the capacity is available // for your use. // - // * cancelled - The Capacity Reservation expired automatically at the date + // * expired - The Capacity Reservation expired automatically at the date // and time specified in your request. The reserved capacity is no longer // available for your use. // - // * expired - The Capacity Reservation was manually cancelled. The reserved + // * cancelled - The Capacity Reservation was manually cancelled. The reserved // capacity is no longer available for your use. // // * pending - The Capacity Reservation request was successful but the capacity @@ -36555,7 +36914,8 @@ type CapacityReservation struct { // that is dedicated to a single AWS account. Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` - // The number of instances for which the Capacity Reservation reserves capacity. + // The total number of instances for which the Capacity Reservation reserves + // capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -37515,8 +37875,7 @@ type ClientVpnEndpoint struct { // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` - // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint. // // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) @@ -38649,9 +39008,8 @@ type CpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -38683,6 +39041,7 @@ type CreateCapacityReservationInput struct { // The Availability Zone in which to create the Capacity Reservation. AvailabilityZone *string `type:"string"` + // The ID of the Availability Zone in which to create the Capacity Reservation. AvailabilityZoneId *string `type:"string"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -38958,8 +39317,7 @@ type CreateClientVpnEndpointInput struct { // Information about the DNS servers to be used for DNS resolution. A Client // VPN endpoint can have up to two DNS servers. If no DNS server is specified, - // the DNS address of the VPC that is to be associated with Client VPN endpoint - // is used as the DNS server. + // the DNS address configured on the device is used for the DNS server. DnsServers []*string `locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -38974,8 +39332,7 @@ type CreateClientVpnEndpointInput struct { // ServerCertificateArn is a required field ServerCertificateArn *string `type:"string" required:"true"` - // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint. // // By default, split-tunnel on a VPN endpoint is disabled. // @@ -39274,6 +39631,9 @@ type CreateCustomerGatewayInput struct { // BgpAsn is a required field BgpAsn *int64 `type:"integer" required:"true"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -39282,9 +39642,7 @@ type CreateCustomerGatewayInput struct { // The Internet-routable IP address for the customer gateway's outside interface. // The address must be static. - // - // PublicIp is a required field - PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + PublicIp *string `locationName:"IpAddress" type:"string"` // The type of VPN connection that this customer gateway supports (ipsec.1). // @@ -39308,9 +39666,6 @@ func (s *CreateCustomerGatewayInput) Validate() error { if s.BgpAsn == nil { invalidParams.Add(request.NewErrParamRequired("BgpAsn")) } - if s.PublicIp == nil { - invalidParams.Add(request.NewErrParamRequired("PublicIp")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -39327,6 +39682,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { + s.CertificateArn = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput { s.DryRun = &v @@ -40835,7 +41196,9 @@ type CreateLaunchTemplateVersionInput struct { // The version number of the launch template version on which to base the new // version. The new version inherits the same launch parameters as the source - // version, except for parameters that you specify in LaunchTemplateData. + // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // applied to the block device mapping are ignored when creating a new version + // unless they are explicitly included. SourceVersion *string `type:"string"` // A description for the version of the launch template. @@ -43607,10 +43970,7 @@ type CreateVolumeInput struct { // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. // - // Defaults: If no volume type is specified, the default is standard in us-east-1, - // eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, - // ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, - // and cn-north-1. In all other Regions, EBS defaults to gp2. + // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` } @@ -44718,6 +45078,9 @@ type CustomerGateway struct { // (ASN). BgpAsn *string `locationName:"bgpAsn" type:"string"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The ID of the customer gateway. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` @@ -44751,6 +45114,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { + s.CertificateArn = &v + return s +} + // SetCustomerGatewayId sets the CustomerGatewayId field's value. func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway { s.CustomerGatewayId = &v @@ -49997,6 +50366,115 @@ func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusO return s } +type DescribeExportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the export image tasks. + ExportImageTaskIds []*string `locationName:"ExportImageTaskId" locationNameList:"ExportImageTaskId" type:"list"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeExportImageTasksInput) SetDryRun(v bool) *DescribeExportImageTasksInput { + s.DryRun = &v + return s +} + +// SetExportImageTaskIds sets the ExportImageTaskIds field's value. +func (s *DescribeExportImageTasksInput) SetExportImageTaskIds(v []*string) *DescribeExportImageTasksInput { + s.ExportImageTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportImageTasksInput) SetFilters(v []*Filter) *DescribeExportImageTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeExportImageTasksInput) SetMaxResults(v int64) *DescribeExportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksInput) SetNextToken(v string) *DescribeExportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export image tasks. + ExportImageTasks []*ExportImageTask `locationName:"exportImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksOutput) GoString() string { + return s.String() +} + +// SetExportImageTasks sets the ExportImageTasks field's value. +func (s *DescribeExportImageTasksOutput) SetExportImageTasks(v []*ExportImageTask) *DescribeExportImageTasksOutput { + s.ExportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksOutput) SetNextToken(v string) *DescribeExportImageTasksOutput { + s.NextToken = &v + return s +} + type DescribeExportTasksInput struct { _ struct{} `type:"structure"` @@ -51807,14 +52285,13 @@ type DescribeImportImageTasksInput struct { DryRun *bool `type:"boolean"` // Filter tasks using the task-state filter and one of the following values: - // active, completed, deleting, deleted. + // active, completed, deleting, or deleted. Filters []*Filter `locationNameList:"Filter" type:"list"` - // A list of import image task IDs. + // The IDs of the import image tasks. ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` - // The maximum number of results to return in a single call. To retrieve the - // remaining results, make another call with the returned NextToken value. + // The maximum number of results to return in a single call. MaxResults *int64 `type:"integer"` // A token that indicates the next page of results. @@ -54549,6 +55026,9 @@ type DescribeRegionsInput struct { // // * endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com). // + // * opt-in-status - The opt-in status of the Region (opt-in-not-required + // | opted-in | not-opted-in). + // // * region-name - The name of the Region (for example, us-east-1). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -60360,6 +60840,19 @@ type DetachNetworkInterfaceInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // Specifies whether to force a detachment. + // + // * Use the Force parameter only as a last resort to detach a network interface + // from a failed instance. + // + // * If you use the Force parameter to detach a network interface, you might + // not be able to attach a different network interface to the same index + // on the instance without first stopping and starting the instance. + // + // * If you force the detachment of a network interface, the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // might not get updated. This means that the attributes associated with + // the detached network interface might still be visible. The instance metadata + // will get updated when you stop and start the instance. Force *bool `locationName:"force" type:"boolean"` } @@ -61909,9 +62402,10 @@ type EbsBlockDevice struct { // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also set the Iops property. + // The volume type. If you set the type to io1, you must also specify the IOPS + // that the volume supports. // - // Default: standard + // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -63035,6 +63529,295 @@ func (s *ExportClientVpnClientConfigurationOutput) SetClientConfiguration(v stri return s } +type ExportImageInput struct { + _ struct{} `type:"structure"` + + // Token to enable idempotency for export image requests. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A description of the image being exported. The maximum length is 255 bytes. + Description *string `type:"string"` + + // The disk image format. + // + // DiskImageFormat is a required field + DiskImageFormat *string `type:"string" required:"true" enum:"DiskImageFormat"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. If this parameter is not specified, the default role is + // named 'vmimport'. + RoleName *string `type:"string"` + + // Information about the destination S3 bucket. The bucket must exist and grant + // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // + // S3ExportLocation is a required field + S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportImageInput"} + if s.DiskImageFormat == nil { + invalidParams.Add(request.NewErrParamRequired("DiskImageFormat")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.S3ExportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportLocation")) + } + if s.S3ExportLocation != nil { + if err := s.S3ExportLocation.Validate(); err != nil { + invalidParams.AddNested("S3ExportLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportImageInput) SetClientToken(v string) *ExportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ExportImageInput) SetDescription(v string) *ExportImageInput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageInput) SetDiskImageFormat(v string) *ExportImageInput { + s.DiskImageFormat = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportImageInput) SetDryRun(v bool) *ExportImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageInput) SetImageId(v string) *ExportImageInput { + s.ImageId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageInput) SetRoleName(v string) *ExportImageInput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) *ExportImageInput { + s.S3ExportLocation = v + return s +} + +type ExportImageOutput struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The disk image format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. + RoleName *string `locationName:"roleName" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageOutput) SetDescription(v string) *ExportImageOutput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageOutput) SetDiskImageFormat(v string) *ExportImageOutput { + s.DiskImageFormat = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageOutput) SetExportImageTaskId(v string) *ExportImageOutput { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageOutput) SetImageId(v string) *ExportImageOutput { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageOutput) SetProgress(v string) *ExportImageOutput { + s.Progress = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageOutput) SetRoleName(v string) *ExportImageOutput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageOutput) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageOutput { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageOutput) SetStatus(v string) *ExportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { + s.StatusMessage = &v + return s +} + +// Describes an export image task. +type ExportImageTask struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageTask) SetDescription(v string) *ExportImageTask { + s.Description = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageTask) SetExportImageTaskId(v string) *ExportImageTask { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageTask) SetImageId(v string) *ExportImageTask { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageTask) SetProgress(v string) *ExportImageTask { + s.Progress = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageTask) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageTask { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageTask) SetStatus(v string) *ExportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { + s.StatusMessage = &v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -63104,6 +63887,87 @@ func (s *ExportTask) SetStatusMessage(v string) *ExportTask { return s } +// Describes the destination for an export image task. +type ExportTaskS3Location struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3Location) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3Location) SetS3Bucket(v string) *ExportTaskS3Location { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { + s.S3Prefix = &v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3LocationRequest struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3LocationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3LocationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTaskS3LocationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTaskS3LocationRequest"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3LocationRequest) SetS3Bucket(v string) *ExportTaskS3LocationRequest { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3LocationRequest { + s.S3Prefix = &v + return s +} + // Describes the format and location for an instance export task. type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -64356,13 +65220,25 @@ func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { type GetCapacityReservationUsageInput struct { _ struct{} `type:"structure"` + // The ID of the Capacity Reservation. + // // CapacityReservationId is a required field CapacityReservationId *string `type:"string" required:"true"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. + // + // Valid range: Minimum value of 1. Maximum value of 1000. MaxResults *int64 `min:"1" type:"integer"` + // The token to retrieve the next page of results. NextToken *string `type:"string"` } @@ -64419,18 +65295,45 @@ func (s *GetCapacityReservationUsageInput) SetNextToken(v string) *GetCapacityRe type GetCapacityReservationUsageOutput struct { _ struct{} `type:"structure"` + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The ID of the Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // Information about the Capacity Reservation usage. InstanceUsages []*InstanceUsage `locationName:"instanceUsageSet" locationNameList:"item" type:"list"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + // The number of instances for which the Capacity Reservation reserves capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -69240,8 +70143,8 @@ type InstanceNetworkInterfaceSpecification struct { // request. SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an instance. + // The ID of the subnet associated with the network interface. Applies only + // if creating a network interface when launching an instance. SubnetId *string `locationName:"subnetId" type:"string"` } @@ -69740,11 +70643,14 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { return s } +// Information about the Capacity Reservation usage. type InstanceUsage struct { _ struct{} `type:"structure"` + // The ID of the AWS account that is making use of the Capacity Reservation. AccountId *string `locationName:"accountId" type:"string"` + // The number of instances the AWS account currently has in the Capacity Reservation. UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` } @@ -70706,9 +71612,8 @@ type LaunchTemplateCpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -72369,7 +73274,7 @@ func (s *ModifyCapacityReservationInput) SetInstanceCount(v int64) *ModifyCapaci type ModifyCapacityReservationOutput struct { _ struct{} `type:"structure"` - // Information about the Capacity Reservation. + // Returns true if the request succeeds; otherwise, it returns an error. Return *bool `locationName:"return" type:"boolean"` } @@ -75389,8 +76294,7 @@ type ModifyVpcEndpointInput struct { DryRun *bool `type:"boolean"` // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. If this parameter is not specified, - // we attach a default policy that allows full access to the service. + // policy must be in valid JSON format. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicate whether a private hosted zone is associated @@ -75915,6 +76819,9 @@ func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput type ModifyVpnConnectionInput struct { _ struct{} `type:"structure"` + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -75956,6 +76863,12 @@ func (s *ModifyVpnConnectionInput) Validate() error { return nil } +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetCustomerGatewayId(v string) *ModifyVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ModifyVpnConnectionInput) SetDryRun(v bool) *ModifyVpnConnectionInput { s.DryRun = &v @@ -76003,6 +76916,93 @@ func (s *ModifyVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *ModifyVp return s } +type ModifyVpnTunnelCertificateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelCertificateInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelCertificateInput) SetDryRun(v bool) *ModifyVpnTunnelCertificateInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnConnectionId(v string) *ModifyVpnTunnelCertificateInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelCertificateInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelCertificateOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelCertificateOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelCertificateOutput { + s.VpnConnection = v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -80982,7 +81982,9 @@ type RequestSpotLaunchSpecification struct { // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` - // The ID of the subnet in which to launch the instance. + // The IDs of the subnets in which to launch the instance. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data for the instance. User data is limited to 16 @@ -84640,7 +85642,7 @@ type ScheduledInstancesEbs struct { // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. // - // Default: standard + // Default: gp2 VolumeType *string `type:"string"` } @@ -85165,7 +86167,7 @@ type SearchTransitGatewayRoutesInput struct { // // * state - The state of the route (active | blackhole). // - // * type - The type of roue (propagated | static). + // * type - The type of route (propagated | static). // // Filters is a required field Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` @@ -85426,6 +86428,70 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr return s } +type SendDiagnosticInterruptInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendDiagnosticInterruptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendDiagnosticInterruptInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SendDiagnosticInterruptInput) SetDryRun(v bool) *SendDiagnosticInterruptInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SendDiagnosticInterruptInput) SetInstanceId(v string) *SendDiagnosticInterruptInput { + s.InstanceId = &v + return s +} + +type SendDiagnosticInterruptOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptOutput) GoString() string { + return s.String() +} + // Describes a service configuration for a VPC endpoint service. type ServiceConfiguration struct { _ struct{} `type:"structure"` @@ -86378,7 +87444,7 @@ type SpotFleetLaunchSpecification struct { // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block devices that are mapped to the Spot instances. You can't + // One or more block devices that are mapped to the Spot Instances. You can't // specify both a snapshot ID and an encryption value. This is because only // blank volumes can be encrypted on creation. If a snapshot is the basis for // a volume, it is not blank and its encryption status is used for the volume @@ -86436,8 +87502,9 @@ type SpotFleetLaunchSpecification struct { // by the value of WeightedCapacity. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The ID of the subnet in which to launch the instances. To specify multiple - // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + // The IDs of the subnets in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The tags to apply during creation. @@ -86668,8 +87735,19 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the Spot Fleet request. + // + // If the allocation strategy is lowestPrice, Spot Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, Spot Fleet launches instances + // from all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, Spot Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` // A unique, case-sensitive identifier that you provide to ensure the idempotency @@ -87362,8 +88440,19 @@ func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { type SpotOptions struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowest-price. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -87447,8 +88536,19 @@ func (s *SpotOptions) SetSingleInstanceType(v bool) *SpotOptions { type SpotOptionsRequest struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -91222,6 +92322,9 @@ type VgwTelemetry struct { // The number of accepted routes. AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The date and time of the last change in status. LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` @@ -91252,6 +92355,12 @@ func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *VgwTelemetry) SetCertificateArn(v string) *VgwTelemetry { + s.CertificateArn = &v + return s +} + // SetLastStatusChange sets the LastStatusChange field's value. func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry { s.LastStatusChange = &v @@ -94333,6 +95442,9 @@ const ( // InstanceTypeI3en24xlarge is a InstanceType enum value InstanceTypeI3en24xlarge = "i3en.24xlarge" + // InstanceTypeI3enMetal is a InstanceType enum value + InstanceTypeI3enMetal = "i3en.metal" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index e403b84a442..31c314e0e5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -9,7 +9,7 @@ // // To learn more, see the following resources: // -// * Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon +// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon // EC2 documentation (http://aws.amazon.com/documentation/ec2) // // * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index 493f71971a6..b27725bc8b0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -768,12 +768,12 @@ func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Requ // When you delete a service, if there are still running tasks that require // cleanup, the service status moves from ACTIVE to DRAINING, and the service // is no longer visible in the console or in the ListServices API operation. -// After the tasks have stopped, then the service status moves from DRAINING -// to INACTIVE. Services in the DRAINING or INACTIVE status can still be viewed -// with the DescribeServices API operation. However, in the future, INACTIVE -// services may be cleaned up and purged from Amazon ECS record keeping, and -// DescribeServices calls on those services return a ServiceNotFoundException -// error. +// After all tasks have transitioned to either STOPPING or STOPPED status, the +// service status moves from DRAINING to INACTIVE. Services in the DRAINING +// or INACTIVE status can still be viewed with the DescribeServices API operation. +// However, in the future, INACTIVE services may be cleaned up and purged from +// Amazon ECS record keeping, and DescribeServices calls on those services return +// a ServiceNotFoundException error. // // If you attempt to create a new service with the same name as an existing // service in either ACTIVE or DRAINING status, you receive an error. @@ -3016,10 +3016,12 @@ func (c *ECS) PutAccountSettingRequest(input *PutAccountSettingInput) (req *requ // PutAccountSetting API operation for Amazon EC2 Container Service. // -// Modifies an account setting. If you change the account setting for the root -// user, the default settings for all of the IAM users and roles for which no -// individual account setting has been specified are reset. For more information, -// see Account Settings (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html) +// Modifies an account setting. Account settings are set on a per-Region basis. +// +// If you change the account setting for the root user, the default settings +// for all of the IAM users and roles for which no individual account setting +// has been specified are reset. For more information, see Account Settings +// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html) // in the Amazon Elastic Container Service Developer Guide. // // When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat @@ -3132,7 +3134,8 @@ func (c *ECS) PutAccountSettingDefaultRequest(input *PutAccountSettingDefaultInp // PutAccountSettingDefault API operation for Amazon EC2 Container Service. // // Modifies an account setting for all IAM users on an account for whom no individual -// account setting has been specified. +// account setting has been specified. Account settings are set on a per-Region +// basis. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5282,8 +5285,30 @@ type Cluster struct { // The metadata that you apply to the cluster to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` } @@ -5800,12 +5825,6 @@ type ContainerDefinition struct { // The log configuration specification for the container. // - // For tasks using the Fargate launch type, the supported log drivers are awslogs - // and splunk. - // - // For tasks using the EC2 launch type, the supported log drivers are awslogs, - // syslog, gelf, fluentd, splunk, journald, and json-file. - // // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). @@ -6557,8 +6576,30 @@ type ContainerInstance struct { // The metadata that you apply to the container instance to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The version counter for the container instance. Every time a container instance @@ -6873,8 +6914,30 @@ type CreateClusterInput struct { // The metadata that you apply to the cluster to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` } @@ -7127,9 +7190,30 @@ type CreateServiceInput struct { // The metadata that you apply to the service to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. When a service is deleted, the tags are deleted as well. Tag keys - // can have a maximum character length of 128 characters, and tag values can - // have a maximum length of 256 characters. + // define. When a service is deleted, the tags are deleted as well. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -8687,8 +8771,30 @@ type DescribeTaskDefinitionOutput struct { // The metadata that is applied to the task definition to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The full task definition description. @@ -9478,6 +9584,21 @@ type LinuxParameters struct { // command: sudo docker version --format '{{.Server.APIVersion}}' InitProcessEnabled *bool `locationName:"initProcessEnabled" type:"boolean"` + // The total amount of swap memory (in MiB) a container can use. This parameter + // will be translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/) + // where the value would be the sum of the container memory plus the maxSwap + // value. + // + // If a maxSwap value of 0 is specified, the container will not use swap. Accepted + // values are 0 or any positive integer. If the maxSwap parameter is omitted, + // the container will use the swap configuration for the container instance + // it is running on. A maxSwap value must be set for the swappiness parameter + // to be used. + // + // If you are using tasks that use the Fargate launch type, the maxSwap parameter + // is not supported. + MaxSwap *int64 `locationName:"maxSwap" type:"integer"` + // The value for the size (in MiB) of the /dev/shm volume. This parameter maps // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/). // @@ -9485,6 +9606,18 @@ type LinuxParameters struct { // parameter is not supported. SharedMemorySize *int64 `locationName:"sharedMemorySize" type:"integer"` + // This allows you to tune a container's memory swappiness behavior. A swappiness + // value of 0 will cause swapping to not happen unless absolutely necessary. + // A swappiness value of 100 will cause pages to be swapped very aggressively. + // Accepted values are whole numbers between 0 and 100. If the swappiness parameter + // is not specified, a default value of 60 is used. If a value is not specified + // for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness + // option to docker run (https://docs.docker.com/engine/reference/run/). + // + // If you are using tasks that use the Fargate launch type, the swappiness parameter + // is not supported. + Swappiness *int64 `locationName:"swappiness" type:"integer"` + // The container path, mount options, and size (in MiB) of the tmpfs mount. // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/). // @@ -9551,12 +9684,24 @@ func (s *LinuxParameters) SetInitProcessEnabled(v bool) *LinuxParameters { return s } +// SetMaxSwap sets the MaxSwap field's value. +func (s *LinuxParameters) SetMaxSwap(v int64) *LinuxParameters { + s.MaxSwap = &v + return s +} + // SetSharedMemorySize sets the SharedMemorySize field's value. func (s *LinuxParameters) SetSharedMemorySize(v int64) *LinuxParameters { s.SharedMemorySize = &v return s } +// SetSwappiness sets the Swappiness field's value. +func (s *LinuxParameters) SetSwappiness(v int64) *LinuxParameters { + s.Swappiness = &v + return s +} + // SetTmpfs sets the Tmpfs field's value. func (s *LinuxParameters) SetTmpfs(v []*Tmpfs) *LinuxParameters { s.Tmpfs = v @@ -10673,7 +10818,7 @@ type LogConfiguration struct { // and splunk. // // For tasks using the EC2 launch type, the supported log drivers are awslogs, - // syslog, gelf, fluentd, splunk, journald, and json-file. + // fluentd, gelf, json-file, journald, logentries, syslog, splunk, and syslog. // // For more information about using the awslogs log driver, see Using the awslogs // Log Driver (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) @@ -11566,8 +11711,30 @@ type RegisterContainerInstanceInput struct { // The metadata that you apply to the container instance to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The resources available on the instance. @@ -11897,8 +12064,30 @@ type RegisterTaskDefinitionInput struct { // The metadata that you apply to the task definition to help you categorize // and organize them. Each tag consists of a key and an optional value, both - // of which you define. Tag keys can have a maximum character length of 128 - // characters, and tag values can have a maximum length of 256 characters. + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The short name or full Amazon Resource Name (ARN) of the IAM role that containers @@ -12341,8 +12530,30 @@ type RunTaskInput struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -12634,7 +12845,9 @@ type Service struct { // deployment and the ordering of stopping and starting tasks. DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` - // The deployment controller type the service is using. + // The deployment controller type the service is using. When using the DescribeServices + // API, this field is omitted if the service is using the ECS deployment controller + // type. DeploymentController *DeploymentController `locationName:"deploymentController" type:"structure"` // The current state of deployments for the service. @@ -12745,8 +12958,30 @@ type Service struct { // The metadata that you apply to the service to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The task definition to use for tasks in the service. This value is specified @@ -13148,8 +13383,30 @@ type StartTaskInput struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The family and revision (family:revision) or full ARN of the task definition @@ -13754,8 +14011,30 @@ func (s *SystemControl) SetValue(v string) *SystemControl { // The metadata that you apply to a resource to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you -// define. Tag keys can have a maximum character length of 128 characters, and -// tag values can have a maximum length of 256 characters. +// define. +// +// The following basic restrictions apply to tags: +// +// * Maximum number of tags per resource - 50 +// +// * For each resource, each tag key must be unique, and each tag key can +// have only one value. +// +// * Maximum key length - 128 Unicode characters in UTF-8 +// +// * Maximum value length - 256 Unicode characters in UTF-8 +// +// * If your tagging schema is used across multiple services and resources, +// remember that other services may have restrictions on allowed characters. +// Generally allowed characters are: letters, numbers, and spaces representable +// in UTF-8, and the following characters: + - = . _ : / @. +// +// * Tag keys and values are case-sensitive. +// +// * Do not use aws:, AWS:, or any upper or lowercase combination of such +// as a prefix for either keys or values as it is reserved for AWS use. You +// cannot edit or delete tag keys or values with this prefix. Tags with this +// prefix do not count against your tags per resource limit. type Tag struct { _ struct{} `type:"structure"` @@ -13813,9 +14092,30 @@ type TagResourceInput struct { // ResourceArn is a required field ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` - // The tags to add to the resource. A tag is an array of key-value pairs. Tag - // keys can have a maximum character length of 128 characters, and tag values - // can have a maximum length of 256 characters. + // The tags to add to the resource. A tag is an array of key-value pairs. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. // // Tags is a required field Tags []*Tag `locationName:"tags" type:"list" required:"true"` @@ -14041,8 +14341,30 @@ type Task struct { // The metadata that you apply to the task to help you categorize and organize // them. Each tag consists of a key and an optional value, both of which you - // define. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50 + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8 + // + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case-sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this + // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` // The Amazon Resource Name (ARN) of the task. @@ -14439,9 +14761,10 @@ type TaskDefinition struct { // The full Amazon Resource Name (ARN) of the task definition. TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` - // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) - // role that grants containers in the task permission to call AWS APIs on your - // behalf. For more information, see Amazon ECS Task Role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_IAM_role.html) + // The short name or full Amazon Resource Name (ARN) of the AWS Identity and + // Access Management (IAM) role that grants containers in the task permission + // to call AWS APIs on your behalf. For more information, see Amazon ECS Task + // Role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_IAM_role.html) // in the Amazon Elastic Container Service Developer Guide. // // IAM roles for tasks on Windows require that the -EnableTaskIAMRole option @@ -14771,10 +15094,7 @@ type TaskSet struct { // * There are no tasks running on container instances in the DRAINING status. // // * All tasks are reporting a healthy status from the load balancers, service - // discovery, and container health checks. If a healthCheckGracePeriodSeconds - // value was set when the service was created, you may see a STEADY_STATE - // reached since unhealthy Elastic Load Balancing target health checks will - // be ignored until it expires. + // discovery, and container health checks. // // If any of those conditions are not met, the stability status returns STABILIZING. StabilityStatus *string `locationName:"stabilityStatus" type:"string" enum:"StabilityStatus"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index 8ed98cc1ca6..7ac1a627e15 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -10638,6 +10638,14 @@ func (s *ListAllowedNodeTypeModificationsInput) SetReplicationGroupId(v string) type ListAllowedNodeTypeModificationsOutput struct { _ struct{} `type:"structure"` + // A string list, each element of which specifies a cache node type which you + // can use to scale your cluster or replication group. + // + // When scaling down on a Redis cluster or replication group using ModifyCacheCluster + // or ModifyReplicationGroup, use a value from this list for the CacheNodeType + // parameter. + ScaleDownModifications []*string `type:"list"` + // A string list, each element of which specifies a cache node type which you // can use to scale your cluster or replication group. // @@ -10657,6 +10665,12 @@ func (s ListAllowedNodeTypeModificationsOutput) GoString() string { return s.String() } +// SetScaleDownModifications sets the ScaleDownModifications field's value. +func (s *ListAllowedNodeTypeModificationsOutput) SetScaleDownModifications(v []*string) *ListAllowedNodeTypeModificationsOutput { + s.ScaleDownModifications = v + return s +} + // SetScaleUpModifications sets the ScaleUpModifications field's value. func (s *ListAllowedNodeTypeModificationsOutput) SetScaleUpModifications(v []*string) *ListAllowedNodeTypeModificationsOutput { s.ScaleUpModifications = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index fd631c06928..ce35d389aee 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -974,6 +974,91 @@ func (c *EMR) DescribeStepWithContext(ctx aws.Context, input *DescribeStepInput, return out, req.Send() } +const opGetBlockPublicAccessConfiguration = "GetBlockPublicAccessConfiguration" + +// GetBlockPublicAccessConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBlockPublicAccessConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBlockPublicAccessConfiguration for more information on using the GetBlockPublicAccessConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBlockPublicAccessConfigurationRequest method. +// req, resp := client.GetBlockPublicAccessConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetBlockPublicAccessConfiguration +func (c *EMR) GetBlockPublicAccessConfigurationRequest(input *GetBlockPublicAccessConfigurationInput) (req *request.Request, output *GetBlockPublicAccessConfigurationOutput) { + op := &request.Operation{ + Name: opGetBlockPublicAccessConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetBlockPublicAccessConfigurationInput{} + } + + output = &GetBlockPublicAccessConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. +// +// Returns the Amazon EMR block public access configuration for your AWS account +// in the current Region. For more information see Configure Block Public Access +// for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) +// in the Amazon EMR Management Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation GetBlockPublicAccessConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// This exception occurs when there is an internal failure in the EMR service. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetBlockPublicAccessConfiguration +func (c *EMR) GetBlockPublicAccessConfiguration(input *GetBlockPublicAccessConfigurationInput) (*GetBlockPublicAccessConfigurationOutput, error) { + req, out := c.GetBlockPublicAccessConfigurationRequest(input) + return out, req.Send() +} + +// GetBlockPublicAccessConfigurationWithContext is the same as GetBlockPublicAccessConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBlockPublicAccessConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) GetBlockPublicAccessConfigurationWithContext(ctx aws.Context, input *GetBlockPublicAccessConfigurationInput, opts ...request.Option) (*GetBlockPublicAccessConfigurationOutput, error) { + req, out := c.GetBlockPublicAccessConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBootstrapActions = "ListBootstrapActions" // ListBootstrapActionsRequest generates a "aws/request.Request" representing the @@ -2203,6 +2288,92 @@ func (c *EMR) PutAutoScalingPolicyWithContext(ctx aws.Context, input *PutAutoSca return out, req.Send() } +const opPutBlockPublicAccessConfiguration = "PutBlockPublicAccessConfiguration" + +// PutBlockPublicAccessConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBlockPublicAccessConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBlockPublicAccessConfiguration for more information on using the PutBlockPublicAccessConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBlockPublicAccessConfigurationRequest method. +// req, resp := client.PutBlockPublicAccessConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutBlockPublicAccessConfiguration +func (c *EMR) PutBlockPublicAccessConfigurationRequest(input *PutBlockPublicAccessConfigurationInput) (req *request.Request, output *PutBlockPublicAccessConfigurationOutput) { + op := &request.Operation{ + Name: opPutBlockPublicAccessConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutBlockPublicAccessConfigurationInput{} + } + + output = &PutBlockPublicAccessConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBlockPublicAccessConfiguration API operation for Amazon Elastic MapReduce. +// +// Creates or updates an Amazon EMR block public access configuration for your +// AWS account in the current Region. For more information see Configure Block +// Public Access for Amazon EMR (https://docs.aws.amazon.com/emr/latest/ManagementGuide/configure-block-public-access.html) +// in the Amazon EMR Management Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation PutBlockPublicAccessConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerException "InternalServerException" +// This exception occurs when there is an internal failure in the EMR service. +// +// * ErrCodeInvalidRequestException "InvalidRequestException" +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutBlockPublicAccessConfiguration +func (c *EMR) PutBlockPublicAccessConfiguration(input *PutBlockPublicAccessConfigurationInput) (*PutBlockPublicAccessConfigurationOutput, error) { + req, out := c.PutBlockPublicAccessConfigurationRequest(input) + return out, req.Send() +} + +// PutBlockPublicAccessConfigurationWithContext is the same as PutBlockPublicAccessConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBlockPublicAccessConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) PutBlockPublicAccessConfigurationWithContext(ctx aws.Context, input *PutBlockPublicAccessConfigurationInput, opts ...request.Option) (*PutBlockPublicAccessConfigurationOutput, error) { + req, out := c.PutBlockPublicAccessConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRemoveAutoScalingPolicy = "RemoveAutoScalingPolicy" // RemoveAutoScalingPolicyRequest generates a "aws/request.Request" representing the @@ -2614,6 +2785,8 @@ func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req // SetVisibleToAllUsers API operation for Amazon Elastic MapReduce. // +// This member will be deprecated. +// // Sets whether all AWS Identity and Access Management (IAM) users under your // account can access the specified clusters (job flows). This action works // on running clusters. You can also set the visibility of a cluster when you @@ -3341,6 +3514,117 @@ func (s *AutoScalingPolicyStatus) SetStateChangeReason(v *AutoScalingPolicyState return s } +// A configuration for Amazon EMR block public access. When BlockPublicSecurityGroupRules +// is set to true, Amazon EMR prevents cluster creation if one of the cluster's +// security groups has a rule that allows inbound traffic from 0.0.0.0/0 or +// ::/0 on a port, unless the port is specified as an exception using PermittedPublicSecurityGroupRuleRanges. +type BlockPublicAccessConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether EMR block public access is enabled (true) or disabled (false). + // By default, the value is false for accounts that have created EMR clusters + // before July 2019. For accounts created after this, the default is true. + // + // BlockPublicSecurityGroupRules is a required field + BlockPublicSecurityGroupRules *bool `type:"boolean" required:"true"` + + // Specifies ports and port ranges that are permitted to have security group + // rules that allow inbound traffic from all public sources. For example, if + // Port 23 (Telnet) is specified for PermittedPublicSecurityGroupRuleRanges, + // Amazon EMR allows cluster creation if a security group associated with the + // cluster has a rule that allows inbound traffic on Port 23 from IPv4 0.0.0.0/0 + // or IPv6 port ::/0 as the source. + // + // By default, Port 22, which is used for SSH access to the cluster EC2 instances, + // is in the list of PermittedPublicSecurityGroupRuleRanges. + PermittedPublicSecurityGroupRuleRanges []*PortRange `type:"list"` +} + +// String returns the string representation +func (s BlockPublicAccessConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockPublicAccessConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BlockPublicAccessConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BlockPublicAccessConfiguration"} + if s.BlockPublicSecurityGroupRules == nil { + invalidParams.Add(request.NewErrParamRequired("BlockPublicSecurityGroupRules")) + } + if s.PermittedPublicSecurityGroupRuleRanges != nil { + for i, v := range s.PermittedPublicSecurityGroupRuleRanges { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PermittedPublicSecurityGroupRuleRanges", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockPublicSecurityGroupRules sets the BlockPublicSecurityGroupRules field's value. +func (s *BlockPublicAccessConfiguration) SetBlockPublicSecurityGroupRules(v bool) *BlockPublicAccessConfiguration { + s.BlockPublicSecurityGroupRules = &v + return s +} + +// SetPermittedPublicSecurityGroupRuleRanges sets the PermittedPublicSecurityGroupRuleRanges field's value. +func (s *BlockPublicAccessConfiguration) SetPermittedPublicSecurityGroupRuleRanges(v []*PortRange) *BlockPublicAccessConfiguration { + s.PermittedPublicSecurityGroupRuleRanges = v + return s +} + +// Properties that describe the AWS principal that created the BlockPublicAccessConfiguration +// using the PutBlockPublicAccessConfiguration action as well as the date and +// time that the configuration was created. Each time a configuration for block +// public access is updated, Amazon EMR updates this metadata. +type BlockPublicAccessConfigurationMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name that created or last modified the configuration. + // + // CreatedByArn is a required field + CreatedByArn *string `min:"20" type:"string" required:"true"` + + // The date and time that the configuration was created. + // + // CreationDateTime is a required field + CreationDateTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s BlockPublicAccessConfigurationMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BlockPublicAccessConfigurationMetadata) GoString() string { + return s.String() +} + +// SetCreatedByArn sets the CreatedByArn field's value. +func (s *BlockPublicAccessConfigurationMetadata) SetCreatedByArn(v string) *BlockPublicAccessConfigurationMetadata { + s.CreatedByArn = &v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *BlockPublicAccessConfigurationMetadata) SetCreationDateTime(v time.Time) *BlockPublicAccessConfigurationMetadata { + s.CreationDateTime = &v + return s +} + // Configuration of a bootstrap action. type BootstrapActionConfig struct { _ struct{} `type:"structure"` @@ -3731,11 +4015,11 @@ type Cluster struct { // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the - // form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, - // emr-5.14.0. For more information about Amazon EMR release versions and included - // application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ + // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. + // For more information about Amazon EMR release versions and included application + // versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ // (https://docs.aws.amazon.com/emr/latest/ReleaseGuide/). The release label - // applies only to Amazon EMR releases versions 4.x and later. Earlier versions + // applies only to Amazon EMR releases version 4.0 and later. Earlier versions // use AmiVersion. ReleaseLabel *string `type:"string"` @@ -3782,6 +4066,8 @@ type Cluster struct { // of a cluster error. TerminationProtected *bool `type:"boolean"` + // This member will be deprecated. + // // Indicates whether the cluster is visible to all IAM users of the AWS account // associated with the cluster. If this value is set to true, all IAM users // of that AWS account can view and manage the cluster if they have the proper @@ -4849,14 +5135,9 @@ type Ec2InstanceAttributes struct { // the master node as a user named "hadoop". Ec2KeyName *string `type:"string"` - // To launch the cluster in Amazon VPC, set this parameter to the identifier - // of the Amazon VPC subnet where you want the cluster to launch. If you do - // not specify this value, the cluster is launched in the normal AWS cloud, - // outside of a VPC. - // - // Amazon VPC currently does not support cluster compute quadruple extra large - // (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance - // type for nodes of a cluster launched in a VPC. + // Set this parameter to the identifier of the Amazon VPC subnet where you want + // the cluster to launch. If you do not specify this value, and your account + // supports EC2-Classic, the cluster launches in EC2-Classic. Ec2SubnetId *string `type:"string"` // The identifier of the Amazon EC2 security group for the master node. @@ -4884,7 +5165,7 @@ type Ec2InstanceAttributes struct { // EC2 cluster instances. Subnets must exist within the same VPC. Amazon EMR // chooses the EC2 subnet with the best fit from among the list of RequestedEc2SubnetIds, // and then launches all cluster instances within that Subnet. If this value - // is not specified, and the account and region support EC2-Classic networks, + // is not specified, and the account and Region support EC2-Classic networks, // the cluster launches instances in the EC2-Classic network and uses RequestedEc2AvailabilityZones // instead of this setting. If EC2-Classic is not supported, and no Subnet is // specified, Amazon EMR chooses the subnet for you. RequestedEc2SubnetIDs and @@ -5020,6 +5301,67 @@ func (s *FailureDetails) SetReason(v string) *FailureDetails { return s } +type GetBlockPublicAccessConfigurationInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetBlockPublicAccessConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockPublicAccessConfigurationInput) GoString() string { + return s.String() +} + +type GetBlockPublicAccessConfigurationOutput struct { + _ struct{} `type:"structure"` + + // A configuration for Amazon EMR block public access. The configuration applies + // to all clusters created in your account for the current Region. The configuration + // specifies whether block public access is enabled. If block public access + // is enabled, security groups associated with the cluster cannot have rules + // that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port + // is specified as an exception using PermittedPublicSecurityGroupRuleRanges + // in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, + // and public access is allowed on this port. You can change this by updating + // the block public access configuration to remove the exception. + // + // BlockPublicAccessConfiguration is a required field + BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` + + // Properties that describe the AWS principal that created the BlockPublicAccessConfiguration + // using the PutBlockPublicAccessConfiguration action as well as the date and + // time that the configuration was created. Each time a configuration for block + // public access is updated, Amazon EMR updates this metadata. + // + // BlockPublicAccessConfigurationMetadata is a required field + BlockPublicAccessConfigurationMetadata *BlockPublicAccessConfigurationMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetBlockPublicAccessConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBlockPublicAccessConfigurationOutput) GoString() string { + return s.String() +} + +// SetBlockPublicAccessConfiguration sets the BlockPublicAccessConfiguration field's value. +func (s *GetBlockPublicAccessConfigurationOutput) SetBlockPublicAccessConfiguration(v *BlockPublicAccessConfiguration) *GetBlockPublicAccessConfigurationOutput { + s.BlockPublicAccessConfiguration = v + return s +} + +// SetBlockPublicAccessConfigurationMetadata sets the BlockPublicAccessConfigurationMetadata field's value. +func (s *GetBlockPublicAccessConfigurationOutput) SetBlockPublicAccessConfigurationMetadata(v *BlockPublicAccessConfigurationMetadata) *GetBlockPublicAccessConfigurationOutput { + s.BlockPublicAccessConfigurationMetadata = v + return s +} + // A job flow step consisting of a JAR file whose main function will be executed. // The main function submits a job for Hadoop to execute and waits for the job // to finish or fail. @@ -6932,6 +7274,8 @@ type JobFlowDetail struct { // is empty. SupportedProducts []*string `type:"list"` + // This member will be deprecated. + // // Specifies whether the cluster is visible to all IAM users of the AWS account // associated with the cluster. If this value is set to true, all IAM users // of that AWS account can view and (if they have the proper policy permissions @@ -7131,14 +7475,8 @@ type JobFlowInstancesConfig struct { // Applies to clusters that use the uniform instance group configuration. To // launch the cluster in Amazon Virtual Private Cloud (Amazon VPC), set this // parameter to the identifier of the Amazon VPC subnet where you want the cluster - // to launch. If you do not specify this value, the cluster launches in the - // normal Amazon Web Services cloud, outside of an Amazon VPC, if the account - // launching the cluster supports EC2 Classic networks in the region where the - // cluster launches. - // - // Amazon VPC currently does not support cluster compute quadruple extra large - // (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance - // type for clusters launched in an Amazon VPC. + // to launch. If you do not specify this value and your account supports EC2-Classic, + // the cluster launches in EC2-Classic. Ec2SubnetId *string `type:"string"` // Applies to clusters that use the instance fleet configuration. When multiple @@ -8450,6 +8788,56 @@ func (s *PlacementType) SetAvailabilityZones(v []*string) *PlacementType { return s } +// A list of port ranges that are permitted to allow inbound traffic from all +// public IP addresses. To specify a single port, use the same value for MinRange +// and MaxRange. +type PortRange struct { + _ struct{} `type:"structure"` + + // The smallest port number in a specified range of port numbers. + MaxRange *int64 `type:"integer"` + + // The smallest port number in a specified range of port numbers. + // + // MinRange is a required field + MinRange *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s PortRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PortRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PortRange"} + if s.MinRange == nil { + invalidParams.Add(request.NewErrParamRequired("MinRange")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxRange sets the MaxRange field's value. +func (s *PortRange) SetMaxRange(v int64) *PortRange { + s.MaxRange = &v + return s +} + +// SetMinRange sets the MinRange field's value. +func (s *PortRange) SetMinRange(v int64) *PortRange { + s.MinRange = &v + return s +} + type PutAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -8565,6 +8953,71 @@ func (s *PutAutoScalingPolicyOutput) SetInstanceGroupId(v string) *PutAutoScalin return s } +type PutBlockPublicAccessConfigurationInput struct { + _ struct{} `type:"structure"` + + // A configuration for Amazon EMR block public access. The configuration applies + // to all clusters created in your account for the current Region. The configuration + // specifies whether block public access is enabled. If block public access + // is enabled, security groups associated with the cluster cannot have rules + // that allow inbound traffic from 0.0.0.0/0 or ::/0 on a port, unless the port + // is specified as an exception using PermittedPublicSecurityGroupRuleRanges + // in the BlockPublicAccessConfiguration. By default, Port 22 (SSH) is an exception, + // and public access is allowed on this port. You can change this by updating + // BlockPublicSecurityGroupRules to remove the exception. + // + // BlockPublicAccessConfiguration is a required field + BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBlockPublicAccessConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBlockPublicAccessConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBlockPublicAccessConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBlockPublicAccessConfigurationInput"} + if s.BlockPublicAccessConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("BlockPublicAccessConfiguration")) + } + if s.BlockPublicAccessConfiguration != nil { + if err := s.BlockPublicAccessConfiguration.Validate(); err != nil { + invalidParams.AddNested("BlockPublicAccessConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlockPublicAccessConfiguration sets the BlockPublicAccessConfiguration field's value. +func (s *PutBlockPublicAccessConfigurationInput) SetBlockPublicAccessConfiguration(v *BlockPublicAccessConfiguration) *PutBlockPublicAccessConfigurationInput { + s.BlockPublicAccessConfiguration = v + return s +} + +type PutBlockPublicAccessConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBlockPublicAccessConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBlockPublicAccessConfigurationOutput) GoString() string { + return s.String() +} + type RemoveAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -8805,11 +9258,11 @@ type RunJobFlowInput struct { // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the - // form emr-x.x.x, where x.x.x is an Amazon EMR release version, for example, - // emr-5.14.0. For more information about Amazon EMR release versions and included - // application versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ + // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. + // For more information about Amazon EMR release versions and included application + // versions and features, see https://docs.aws.amazon.com/emr/latest/ReleaseGuide/ // (https://docs.aws.amazon.com/emr/latest/ReleaseGuide/). The release label - // applies only to Amazon EMR releases versions 4.x and later. Earlier versions + // applies only to Amazon EMR releases version 4.0 and later. Earlier versions // use AmiVersion. ReleaseLabel *string `type:"string"` @@ -8860,6 +9313,8 @@ type RunJobFlowInput struct { // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags []*Tag `type:"list"` + // This member will be deprecated. + // // Whether the cluster is visible to all IAM users of the AWS account associated // with the cluster. If this value is set to true, all IAM users of that AWS // account can view and (if they have the proper policy permissions set) manage @@ -9489,6 +9944,8 @@ func (s SetTerminationProtectionOutput) GoString() string { return s.String() } +// This member will be deprecated. +// // The input to the SetVisibleToAllUsers action. type SetVisibleToAllUsersInput struct { _ struct{} `type:"structure"` @@ -9498,6 +9955,8 @@ type SetVisibleToAllUsersInput struct { // JobFlowIds is a required field JobFlowIds []*string `type:"list" required:"true"` + // This member will be deprecated. + // // Whether the specified clusters are visible to all IAM users of the AWS account // associated with the cluster. If this value is set to True, all IAM users // of that AWS account can view and, if they have the proper IAM policy permissions diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go b/vendor/github.com/aws/aws-sdk-go/service/glue/api.go index b86fc1012f0..92eb5a86dc5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/api.go @@ -5568,151 +5568,6 @@ func (c *Glue) GetJobBookmarkWithContext(ctx aws.Context, input *GetJobBookmarkI return out, req.Send() } -const opGetJobBookmarks = "GetJobBookmarks" - -// GetJobBookmarksRequest generates a "aws/request.Request" representing the -// client's request for the GetJobBookmarks operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetJobBookmarks for more information on using the GetJobBookmarks -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetJobBookmarksRequest method. -// req, resp := client.GetJobBookmarksRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobBookmarks -func (c *Glue) GetJobBookmarksRequest(input *GetJobBookmarksInput) (req *request.Request, output *GetJobBookmarksOutput) { - op := &request.Operation{ - Name: opGetJobBookmarks, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetJobBookmarksInput{} - } - - output = &GetJobBookmarksOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetJobBookmarks API operation for AWS Glue. -// -// Returns information on the job bookmark entries. The list is ordered on decreasing -// version numbers. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Glue's -// API operation GetJobBookmarks for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidInputException "InvalidInputException" -// The input provided was not valid. -// -// * ErrCodeEntityNotFoundException "EntityNotFoundException" -// A specified entity does not exist -// -// * ErrCodeInternalServiceException "InternalServiceException" -// An internal service error occurred. -// -// * ErrCodeOperationTimeoutException "OperationTimeoutException" -// The operation timed out. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetJobBookmarks -func (c *Glue) GetJobBookmarks(input *GetJobBookmarksInput) (*GetJobBookmarksOutput, error) { - req, out := c.GetJobBookmarksRequest(input) - return out, req.Send() -} - -// GetJobBookmarksWithContext is the same as GetJobBookmarks with the addition of -// the ability to pass a context and additional request options. -// -// See GetJobBookmarks for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobBookmarksWithContext(ctx aws.Context, input *GetJobBookmarksInput, opts ...request.Option) (*GetJobBookmarksOutput, error) { - req, out := c.GetJobBookmarksRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetJobBookmarksPages iterates over the pages of a GetJobBookmarks operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetJobBookmarks method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetJobBookmarks operation. -// pageNum := 0 -// err := client.GetJobBookmarksPages(params, -// func(page *glue.GetJobBookmarksOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Glue) GetJobBookmarksPages(input *GetJobBookmarksInput, fn func(*GetJobBookmarksOutput, bool) bool) error { - return c.GetJobBookmarksPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetJobBookmarksPagesWithContext same as GetJobBookmarksPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Glue) GetJobBookmarksPagesWithContext(ctx aws.Context, input *GetJobBookmarksInput, fn func(*GetJobBookmarksOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetJobBookmarksInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetJobBookmarksRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*GetJobBookmarksOutput), !p.HasNextPage()) - } - return p.Err() -} - const opGetJobRun = "GetJobRun" // GetJobRunRequest generates a "aws/request.Request" representing the @@ -21176,96 +21031,6 @@ func (s *GetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *GetJobB return s } -type GetJobBookmarksInput struct { - _ struct{} `type:"structure"` - - // The name of the job in question. - // - // JobName is a required field - JobName *string `type:"string" required:"true"` - - // The maximum size of the response. - MaxResults *int64 `type:"integer"` - - // A continuation token, if this is a continuation call. - NextToken *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetJobBookmarksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobBookmarksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetJobBookmarksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetJobBookmarksInput"} - if s.JobName == nil { - invalidParams.Add(request.NewErrParamRequired("JobName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetJobName sets the JobName field's value. -func (s *GetJobBookmarksInput) SetJobName(v string) *GetJobBookmarksInput { - s.JobName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetJobBookmarksInput) SetMaxResults(v int64) *GetJobBookmarksInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobBookmarksInput) SetNextToken(v int64) *GetJobBookmarksInput { - s.NextToken = &v - return s -} - -type GetJobBookmarksOutput struct { - _ struct{} `type:"structure"` - - // A list of job bookmark entries that defines a point that a job can resume - // processing. - JobBookmarkEntries []*JobBookmarkEntry `type:"list"` - - // A continuation token, which has a value of 1 if all the entries are returned, - // or > 1 if not all requested job runs have been returned. - NextToken *int64 `type:"integer"` -} - -// String returns the string representation -func (s GetJobBookmarksOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetJobBookmarksOutput) GoString() string { - return s.String() -} - -// SetJobBookmarkEntries sets the JobBookmarkEntries field's value. -func (s *GetJobBookmarksOutput) SetJobBookmarkEntries(v []*JobBookmarkEntry) *GetJobBookmarksOutput { - s.JobBookmarkEntries = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetJobBookmarksOutput) SetNextToken(v int64) *GetJobBookmarksOutput { - s.NextToken = &v - return s -} - type GetJobInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go index f618f0da698..0ab636735ef 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/customizations.go @@ -9,14 +9,14 @@ import ( var readDuration = 5 * time.Second func init() { - ops := []string{ - opGetRecords, - } - initRequest = func(r *request.Request) { - for _, operation := range ops { - if r.Operation.Name == operation { - r.ApplyOptions(request.WithResponseReadTimeout(readDuration)) - } - } + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + if r.Operation.Name == opGetRecords { + r.ApplyOptions(request.WithResponseReadTimeout(readDuration)) } + + // Service specific error codes. Github(aws/aws-sdk-go#1376) + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeLimitExceededException) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index 904a1f3dbe0..637052f4211 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -13854,8 +13854,22 @@ type CreateDBClusterInput struct { // in the Amazon Aurora User Guide. EnableCloudwatchLogsExports []*string `type:"list"` + // A value that indicates whether to enable the HTTP endpoint for an Aurora + // Serverless DB cluster. By default, the HTTP endpoint is disabled. + // + // When enabled, the HTTP endpoint provides a connectionless web service API + // for running SQL queries on the Aurora Serverless DB cluster. You can also + // query your database from inside the RDS console with the query editor. + // + // For more information, see Using the Data API for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) + // in the Amazon Aurora User Guide. + EnableHttpEndpoint *bool `type:"boolean"` + // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for this DB cluster. @@ -13872,13 +13886,28 @@ type CreateDBClusterInput struct { // The version number of the database engine to use. // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // // Aurora MySQL // - // Example: 5.6.10a, 5.7.12 + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 // // Aurora PostgreSQL // - // Example: 9.6.3 + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The global cluster ID of an Aurora cluster that becomes the primary cluster @@ -14125,6 +14154,12 @@ func (s *CreateDBClusterInput) SetEnableCloudwatchLogsExports(v []*string) *Crea return s } +// SetEnableHttpEndpoint sets the EnableHttpEndpoint field's value. +func (s *CreateDBClusterInput) SetEnableHttpEndpoint(v bool) *CreateDBClusterInput { + s.EnableHttpEndpoint = &v + return s +} + // SetEnableIAMDatabaseAuthentication sets the EnableIAMDatabaseAuthentication field's value. func (s *CreateDBClusterInput) SetEnableIAMDatabaseAuthentication(v bool) *CreateDBClusterInput { s.EnableIAMDatabaseAuthentication = &v @@ -14771,6 +14806,20 @@ type CreateDBInstanceInput struct { // * For MySQL 5.6, minor version 5.6.34 or higher // // * For MySQL 5.7, minor version 5.7.16 or higher + // + // * For MySQL 8.0, minor version 8.0.16 or higher + // + // PostgreSQL + // + // * For PostgreSQL 9.5, minor version 9.5.15 or higher + // + // * For PostgreSQL 9.6, minor version 9.6.11 or higher + // + // * PostgreSQL 10.6, 10.7, and 10.9 + // + // For more information, see IAM Database Authentication for MySQL and PostgreSQL + // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // A value that indicates whether to enable Performance Insights for the DB @@ -14858,8 +14907,7 @@ type CreateDBInstanceInput struct { // The amount of Provisioned IOPS (input/output operations per second) to be // initially allocated for the DB instance. For information about valid Iops - // values, see see Amazon RDS Provisioned IOPS Storage to Improve Performance - // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + // values, see Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) // in the Amazon RDS User Guide. // // Constraints: Must be a multiple between 1 and 50 of the storage amount for @@ -15611,14 +15659,11 @@ type CreateDBInstanceReadReplicaInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // * Aurora MySQL 5.6 or higher + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // A value that indicates whether to enable Performance Insights for the Read @@ -21660,7 +21705,18 @@ type DescribeDBClusterSnapshotsInput struct { // must also be specified. DBClusterSnapshotIdentifier *string `type:"string"` - // This parameter is not currently supported. + // A filter that specifies one or more DB cluster snapshots to describe. + // + // Supported filters: + // + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). + // + // * db-cluster-snapshot-id - Accepts DB cluster snapshot identifiers. + // + // * snapshot-type - Accepts types of DB cluster snapshots. + // + // * engine - Accepts names of database engines. Filters []*Filter `locationNameList:"Filter" type:"list"` // A value that indicates whether to include manual DB cluster snapshots that @@ -22317,6 +22373,10 @@ type DescribeDBInstancesInput struct { // * db-instance-id - Accepts DB instance identifiers and DB instance Amazon // Resource Names (ARNs). The results list will only include information // about the DB instances identified by these ARNs. + // + // * dbi-resource-id - Accepts DB instance resource identifiers. The results + // list will only include information about the DB instances identified by + // these resource identifiers. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBInstances request. @@ -23073,7 +23133,20 @@ type DescribeDBSnapshotsInput struct { // A specific DB resource ID to describe. DbiResourceId *string `type:"string"` - // This parameter is not currently supported. + // A filter that specifies one or more DB snapshots to describe. + // + // Supported filters: + // + // * db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // Resource Names (ARNs). + // + // * db-snapshot-id - Accepts DB snapshot identifiers. + // + // * dbi-resource-id - Accepts identifiers of source DB instances. + // + // * snapshot-type - Accepts types of DB snapshots. + // + // * engine - Accepts names of database engines. Filters []*Filter `locationNameList:"Filter" type:"list"` // A value that indicates whether to include manual DB cluster snapshots that @@ -26672,13 +26745,29 @@ type ModifyDBClusterInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The version number of the database engine to which you want to upgrade. Changing // this parameter results in an outage. The change is applied during the next // maintenance window unless ApplyImmediately is enabled. // - // For a list of valid engine versions, use DescribeDBEngineVersions. + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" EngineVersion *string `type:"string"` // The new password for the master database user. This password can contain @@ -27324,19 +27413,11 @@ type ModifyDBInstanceInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // You can enable IAM database authentication for the following database engines - // - // Amazon Aurora - // - // Not applicable. Mapping AWS IAM accounts to database accounts is managed - // by the DB cluster. For more information, see ModifyDBCluster. - // - // MySQL - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // A value that indicates whether to enable Performance Insights for the DB @@ -29681,8 +29762,7 @@ type PendingMaintenanceAction struct { // The date of the maintenance window when the action is applied. The maintenance // action is applied to the resource during its first maintenance window after - // this date. If this date is specified, any next-maintenance opt-in requests - // are ignored. + // this date. AutoAppliedAfterDate *time.Time `type:"timestamp"` // The effective date when the pending maintenance action is applied to the @@ -29697,8 +29777,7 @@ type PendingMaintenanceAction struct { // The date when the maintenance action is automatically applied. The maintenance // action is applied to the resource on this date regardless of the maintenance - // window for the resource. If this date is specified, any immediate opt-in - // requests are ignored. + // window for the resource. ForcedApplyDate *time.Time `type:"timestamp"` // Indicates the type of opt-in request that has been received for the resource. @@ -31295,6 +31374,9 @@ type RestoreDBClusterFromS3Input struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The name of the database engine to be used for the restored DB cluster. @@ -31306,13 +31388,28 @@ type RestoreDBClusterFromS3Input struct { // The version number of the database engine to use. // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // // Aurora MySQL // - // Example: 5.6.10a + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 // // Aurora PostgreSQL // - // Example: 9.6.3 + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The AWS KMS key identifier for an encrypted DB cluster. @@ -31765,6 +31862,9 @@ type RestoreDBClusterFromSnapshotInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new DB cluster. @@ -31781,6 +31881,29 @@ type RestoreDBClusterFromSnapshotInput struct { EngineMode *string `type:"string"` // The version of the database engine to use for the new DB cluster. + // + // To list all of the available engine versions for aurora (for MySQL 5.6-compatible + // Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-mysql (for MySQL + // 5.7-compatible Aurora), use the following command: + // + // aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" + // + // To list all of the available engine versions for aurora-postgresql, use the + // following command: + // + // aws rds describe-db-engine-versions --engine aurora-postgresql --query "DBEngineVersions[].EngineVersion" + // + // Aurora MySQL + // + // Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 + // + // Aurora PostgreSQL + // + // Example: 9.6.3, 10.7 EngineVersion *string `type:"string"` // The AWS KMS key identifier to use when restoring an encrypted DB cluster @@ -32078,6 +32201,9 @@ type RestoreDBClusterToPointInTimeInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // + // For more information, see IAM Database Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The AWS KMS key identifier to use when restoring an encrypted DB cluster @@ -32433,12 +32559,11 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. @@ -32874,6 +32999,11 @@ type RestoreDBInstanceFromS3Input struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. + // + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // A value that indicates whether to enable Performance Insights for the DB @@ -32897,7 +33027,7 @@ type RestoreDBInstanceFromS3Input struct { // The amount of Provisioned IOPS (input/output operations per second) to allocate // initially for the DB instance. For information about valid Iops values, see - // see Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) + // Amazon RDS Provisioned IOPS Storage to Improve Performance (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) // in the Amazon RDS User Guide. Iops *int64 `type:"integer"` @@ -33489,12 +33619,11 @@ type RestoreDBInstanceToPointInTimeInput struct { // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. + // For information about the supported DB engines, see CreateDBInstance. // - // You can enable IAM database authentication for the following database engines - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher + // For more information about IAM database authentication, see IAM Database + // Authentication for MySQL and PostgreSQL (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) + // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` // The database engine to use for the new instance. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go index 2201ab2c053..e3218e3c08b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -228,7 +228,7 @@ func (c *SageMaker) CreateCodeRepositoryRequest(input *CreateCodeRepositoryInput // more than one notebook instance, and it persists independently from the lifecycle // of any notebook instances it is associated with. // -// The repository can be hosted either in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) +// The repository can be hosted either in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -443,7 +443,7 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // model artifacts from the S3 path you provided. AWS STS is activated in your // IAM user account by default. If you previously deactivated AWS STS for a // region, you need to reactivate AWS STS for that region. For more information, -// see Activating and Deactivating AWS STS i an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// see Activating and Deactivating AWS STS i an AWS Region (IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -729,11 +729,11 @@ func (c *SageMaker) CreateLabelingJobRequest(input *CreateLabelingJobInput) (req // that need to be labeled by a human. Automated data labeling uses active learning // to determine if a data object can be labeled by machine or if it needs to // be sent to a human worker. For more information, see Using Automated Data -// Labeling (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html). +// Labeling (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html). // // The data objects to be labeled are contained in an Amazon S3 bucket. You // create a manifest file that describes the location of each object. For more -// information, see Using Input and Output Data (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html). +// information, see Using Input and Output Data (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-data.html). // // The output can be used as the manifest file for another labeling job or as // training data for your machine learning models. @@ -1036,7 +1036,8 @@ func (c *SageMaker) CreateNotebookInstanceRequest(input *CreateNotebookInstanceI // groups allow it. // // After creating the notebook instance, Amazon SageMaker returns its Amazon -// Resource Name (ARN). +// Resource Name (ARN). You can't change the name of a notebook instance after +// you create it. // // After Amazon SageMaker creates the notebook instance, you can connect to // the Jupyter server and work in Jupyter notebooks. For example, you can write @@ -1328,23 +1329,29 @@ func (c *SageMaker) CreateTrainingJobRequest(input *CreateTrainingJobInput) (req // for each training algorithm provided by Amazon SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // -// * InputDataConfig - Describes the training dataset and the Amazon S3 location -// where it is stored. +// * InputDataConfig - Describes the training dataset and the Amazon S3, +// EFS, or FSx location where it is stored. // -// * OutputDataConfig - Identifies the Amazon S3 location where you want -// Amazon SageMaker to save the results of model training. +// * OutputDataConfig - Identifies the Amazon S3 bucket where you want Amazon +// SageMaker to save the results of model training. // // * ResourceConfig - Identifies the resources, ML compute instances, and // ML storage volumes to deploy for model training. In distributed training, // you specify more than one instance. // +// * EnableManagedSpotTraining - Optimize the cost of training machine learning +// models by up to 80% by using Amazon EC2 Spot instances. For more information, +// see Managed Spot Training (https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). +// // * RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes // to perform tasks on your behalf during model training. You must grant // this role the necessary permissions so that Amazon SageMaker can successfully // complete model training. // -// * StoppingCondition - Sets a time limit for training. Use this parameter -// to cap model training costs. +// * StoppingCondition - To help cap training costs, use MaxRuntimeInSeconds +// to set a time limit for training. Use MaxWaitTimeInSeconds to specify +// how long you are willing to to wait for a managed spot training job to +// complete. // // For more information about Amazon SageMaker, see How It Works (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). // @@ -7598,7 +7605,10 @@ type AnnotationConsolidationConfig struct { // on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox // arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox // arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox - // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox // // * Image classification - Uses a variant of the Expectation Maximization // approach to estimate the true class of an image based on annotations from @@ -7606,6 +7616,10 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass // arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass // // * Semantic segmentation - Treats each pixel in an image as a multi-class // classification and treats pixel annotations from workers as "votes" for @@ -7615,6 +7629,12 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation // // * Text classification - Uses a variant of the Expectation Maximization // approach to estimate the true class of text based on annotations from @@ -7622,8 +7642,34 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass // arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass - // - // For more information, see Annotation Consolidation (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass + // + // * Named entity eecognition - Groups similar selections and calculates + // aggregate boundaries, resolving to most-assigned label. arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition + // + // * Named entity eecognition - Groups similar selections and calculates + // aggregate boundaries, resolving to most-assigned label. arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition + // + // For more information, see Annotation Consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). // // AnnotationConsolidationLambdaArn is a required field AnnotationConsolidationLambdaArn *string `type:"string" required:"true"` @@ -8005,6 +8051,57 @@ func (s *ChannelSpecification) SetSupportedInputModes(v []*string) *ChannelSpeci return s } +// Contains information about the output location for managed spot training +// checkpoint data. +type CheckpointConfig struct { + _ struct{} `type:"structure"` + + // (Optional) The local directory where checkpoints are written. The default + // directory is /opt/ml/checkpoints/. + LocalPath *string `type:"string"` + + // Identifies the S3 path where you want Amazon SageMaker to store checkpoints. + // For example, s3://bucket-name/key-name-prefix. + // + // S3Uri is a required field + S3Uri *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckpointConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckpointConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckpointConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckpointConfig"} + if s.S3Uri == nil { + invalidParams.Add(request.NewErrParamRequired("S3Uri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocalPath sets the LocalPath field's value. +func (s *CheckpointConfig) SetLocalPath(v string) *CheckpointConfig { + s.LocalPath = &v + return s +} + +// SetS3Uri sets the S3Uri field's value. +func (s *CheckpointConfig) SetS3Uri(v string) *CheckpointConfig { + s.S3Uri = &v + return s +} + // Specifies summary information about a Git repository. type CodeRepositorySummary struct { _ struct{} `type:"structure"` @@ -8260,7 +8357,7 @@ type ContainerDefinition struct { // When a ContainerDefinition is part of an inference pipeline, the value of // ths parameter uniquely identifies the container for the purposes of logging // and metrics. For information, see Use Logs and Metrics to Monitor an Inference - // Pipeline (http://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html). + // Pipeline (https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html). // If you don't specify a value for this parameter for a ContainerDefinition // that is part of an inference pipeline, a unique name is automatically assigned // based on the position of the ContainerDefinition in the pipeline. If you @@ -8286,14 +8383,14 @@ type ContainerDefinition struct { // are stored. This path must point to a single gzip compressed tar archive // (.tar.gz suffix). The S3 path is required for Amazon SageMaker built-in algorithms, // but not if you use your own algorithms. For more information on built-in - // algorithms, see Common Parameters (http://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). + // algorithms, see Common Parameters (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). // // If you provide a value for this parameter, Amazon SageMaker uses AWS Security // Token Service to download model artifacts from the S3 path you provide. AWS // STS is activated in your IAM user account by default. If you previously deactivated // AWS STS for a region, you need to reactivate AWS STS for that region. For // more information, see Activating and Deactivating AWS STS in an AWS Region - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // If you use a built-in algorithm to create a model, Amazon SageMaker requires @@ -8381,7 +8478,7 @@ type ContinuousParameterRange struct { // The scale that hyperparameter tuning uses to search the hyperparameter range. // For information about choosing a hyperparameter scale, see Hyperparameter - // Scaling (http://docs.aws.amazon.com//sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // Scaling (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). // One of the following values: // // Auto @@ -9176,7 +9273,7 @@ type CreateHyperParameterTuningJobInput struct { // An array of key-value pairs. You can use tags to categorize your AWS resources // in different ways, for example, by purpose, owner, or environment. For more - // information, see AWS Tagging Strategies (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). + // information, see AWS Tagging Strategies (https://docs.aws.amazon.com/https:/aws.amazon.com/answers/account-management/aws-tagging-strategies/). // // Tags that you specify for the tuning job are also added to all training jobs // that the tuning job launches. @@ -9406,7 +9503,7 @@ type CreateLabelingJobInput struct { StoppingConditions *LabelingJobStoppingConditions `type:"structure"` // An array of key/value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -9898,24 +9995,24 @@ type CreateNotebookInstanceInput struct { // A list of Elastic Inference (EI) instance types to associate with this notebook // instance. Currently, only one instance type can be associated with a notebook // instance. For more information, see Using Elastic Inference in Amazon SageMaker - // (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A Git repository to associate with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // Sets whether Amazon SageMaker provides internet access to the notebook instance. @@ -9938,7 +10035,7 @@ type CreateNotebookInstanceInput struct { // The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon // SageMaker uses to encrypt data on the storage volume attached to your notebook // instance. The KMS key you provide must be enabled. For information, see Enabling - // and Disabling Keys (http://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) + // and Disabling Keys (https://docs.aws.amazon.com/kms/latest/developerguide/enabling-keys.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -9956,7 +10053,7 @@ type CreateNotebookInstanceInput struct { // SageMaker assumes this role to perform tasks on your behalf. You must grant // this role necessary permissions so Amazon SageMaker can perform these tasks. // The policy must allow the Amazon SageMaker service principal (sagemaker.amazonaws.com) - // permissions to assume this role. For more information, see Amazon SageMaker + // permissionsto to assume this role. For more information, see Amazon SageMaker // Roles (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). // // To be able to pass this role to Amazon SageMaker, the caller of this API @@ -10333,6 +10430,10 @@ type CreateTrainingJobInput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // To encrypt all communications between ML compute instances in distributed // training, choose True. Encryption provides greater security for distributed // training, but training might take longer. How long it takes depends on the @@ -10342,6 +10443,18 @@ type CreateTrainingJobInput struct { // Training Job (https://docs.aws.amazon.com/sagemaker/latest/dg/train-encrypt.html). EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // To train models using managed spot training, choose True. Managed spot training + // provides a fully managed and scalable infrastructure for training machine + // learning models. this option is useful when training jobs can be interrupted + // and when there is flexibility when the training job is run. + // + // The complete and intermediate results of jobs are stored in an Amazon S3 + // bucket, and can be used as a starting point to train models incrementally. + // Amazon SageMaker provides metrics and logs in CloudWatch. They can be used + // to see when managed spot training jobs are running, interrupted, resumed, + // or completed. + EnableManagedSpotTraining *bool `type:"boolean"` + // Isolates the training container. No inbound or outbound network calls can // be made, except for calls between peers within a training cluster for distributed // training. If you enable network isolation for training jobs that are configured @@ -10367,16 +10480,19 @@ type CreateTrainingJobInput struct { // // Algorithms can accept input data from one or more channels. For example, // an algorithm might have two channels of input data, training_data and validation_data. - // The configuration for each channel provides the S3 location where the input - // data is stored. It also provides information about the stored data: the MIME - // type, compression method, and whether the data is wrapped in RecordIO format. + // The configuration for each channel provides the S3, EFS, or FSx location + // where the input data is stored. It also provides information about the stored + // data: the MIME type, compression method, and whether the data is wrapped + // in RecordIO format. // // Depending on the input mode that the algorithm supports, Amazon SageMaker // either copies input data files from an S3 bucket to a local directory in - // the Docker container, or makes it available as input streams. + // the Docker container, or makes it available as input streams. For example, + // if you specify an EFS location, input data files will be made available as + // input streams. They do not need to be downloaded. InputDataConfig []*Channel `min:"1" type:"list"` - // Specifies the path to the S3 bucket where you want to store model artifacts. + // Specifies the path to the S3 location where you want to store model artifacts. // Amazon SageMaker creates subfolders for the artifacts. // // OutputDataConfig is a required field @@ -10484,6 +10600,11 @@ func (s *CreateTrainingJobInput) Validate() error { invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) } } + if s.CheckpointConfig != nil { + if err := s.CheckpointConfig.Validate(); err != nil { + invalidParams.AddNested("CheckpointConfig", err.(request.ErrInvalidParams)) + } + } if s.InputDataConfig != nil { for i, v := range s.InputDataConfig { if v == nil { @@ -10537,12 +10658,24 @@ func (s *CreateTrainingJobInput) SetAlgorithmSpecification(v *AlgorithmSpecifica return s } +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *CreateTrainingJobInput) SetCheckpointConfig(v *CheckpointConfig) *CreateTrainingJobInput { + s.CheckpointConfig = v + return s +} + // SetEnableInterContainerTrafficEncryption sets the EnableInterContainerTrafficEncryption field's value. func (s *CreateTrainingJobInput) SetEnableInterContainerTrafficEncryption(v bool) *CreateTrainingJobInput { s.EnableInterContainerTrafficEncryption = &v return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *CreateTrainingJobInput) SetEnableManagedSpotTraining(v bool) *CreateTrainingJobInput { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *CreateTrainingJobInput) SetEnableNetworkIsolation(v bool) *CreateTrainingJobInput { s.EnableNetworkIsolation = &v @@ -10645,8 +10778,13 @@ type CreateTransformJobInput struct { // limit, set BatchStrategy to MultiRecord and SplitType to Line. BatchStrategy *string `type:"string" enum:"BatchStrategy"` - // The data structure used for combining the input data and inference in the - // output file. For more information, see Batch Transform I/O Join (http://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-io-join.html). + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). DataProcessing *DataProcessing `type:"structure"` // The environment variables to set in the Docker container. We support up to @@ -10658,7 +10796,7 @@ type CreateTransformJobInput struct { // Amazon SageMaker checks the optional execution-parameters to determine the // optimal settings for your chosen algorithm. If the execution-parameters endpoint // is not enabled, the default value is 1. For more information on execution-parameters, - // see How Containers Serve Requests (http://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). + // see How Containers Serve Requests (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-batch-code.html#your-algorithms-batch-code-how-containe-serves-requests). // For built-in algorithms, you don't need to set a value for MaxConcurrentTransforms. MaxConcurrentTransforms *int64 `type:"integer"` @@ -10874,7 +11012,7 @@ type CreateWorkteamInput struct { // A list of MemberDefinition objects that contains objects that identify the // Amazon Cognito user pool that makes up the work team. For more information, - // see Amazon Cognito User Pools (http://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // see Amazon Cognito User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). // // All of the CognitoMemberDefinition objects that make up the member definition // must have the same ClientId and UserPool values. @@ -10885,6 +11023,11 @@ type CreateWorkteamInput struct { // Configures notification of workers regarding available or expiring work items. NotificationConfiguration *NotificationConfiguration `type:"structure"` + // An array of key-value pairs. + // + // For more information, see Resource Tag (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) + // and Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` // The name of the work team. Use this name to identify the work team. @@ -11005,17 +11148,21 @@ func (s *CreateWorkteamOutput) SetWorkteamArn(v string) *CreateWorkteamOutput { return s } -// The data structure used to combine the input data and transformed data from -// the batch transform output into a joined dataset and to store it in an output -// file. It also contains information on how to filter the input data and the -// joined dataset. For more information, see Batch Transform I/O Join (http://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-io-join.html). +// The data structure used to specify the data to be used for inference in a +// batch transform job and to associate the data that is relevant to the prediction +// results in the output. The input filter provided allows you to exclude input +// data that is not needed for inference in a batch transform job. The output +// filter provided allows you to include input data relevant to interpreting +// the predictions in the output from the job. For more information, see Associate +// Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). type DataProcessing struct { _ struct{} `type:"structure"` - // A JSONPath expression used to select a portion of the input data to pass - // to the algorithm. Use the InputFilter parameter to exclude fields, such as - // an ID column, from the input. If you want Amazon SageMaker to pass the entire - // input dataset to the algorithm, accept the default value $. + // A JSONPath (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) + // expression used to select a portion of the input data to pass to the algorithm. + // Use the InputFilter parameter to exclude fields, such as an ID column, from + // the input. If you want Amazon SageMaker to pass the entire input dataset + // to the algorithm, accept the default value $. // // Examples: "$", "$[1:]", "$.features" InputFilter *string `type:"string"` @@ -11024,8 +11171,7 @@ type DataProcessing struct { // values are None and Input The default value is None which specifies not to // join the input with the transformed data. If you want the batch transform // job to join the original input data with the transformed data, set JoinSource - // to Input. To join input and output, the batch transform job must satisfy - // the Requirements for Using Batch Transform I/O Join (http://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-io-join.html#batch-transform-io-join-requirements). + // to Input. // // For JSON or JSONLines objects, such as a JSON array, Amazon SageMaker adds // the transformed data to the input JSON object in an attribute called SageMakerOutput. @@ -11040,13 +11186,14 @@ type DataProcessing struct { // is a CSV file. JoinSource *string `type:"string" enum:"JoinSource"` - // A JSONPath expression used to select a portion of the joined dataset to save - // in the output file for a batch transform job. If you want Amazon SageMaker - // to store the entire input dataset in the output file, leave the default value, - // $. If you specify indexes that aren't within the dimension size of the joined + // A JSONPath (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html#data-processing-operators) + // expression used to select a portion of the joined dataset to save in the + // output file for a batch transform job. If you want Amazon SageMaker to store + // the entire input dataset in the output file, leave the default value, $. + // If you specify indexes that aren't within the dimension size of the joined // dataset, you get an error. // - // Examples: "$", "$[0,5:]", "$.['id','SageMakerOutput']" + // Examples: "$", "$[0,5:]", "$['id','SageMakerOutput']" OutputFilter *string `type:"string"` } @@ -11082,6 +11229,9 @@ func (s *DataProcessing) SetOutputFilter(v string) *DataProcessing { type DataSource struct { _ struct{} `type:"structure"` + // The file system that is associated with a channel. + FileSystemDataSource *FileSystemDataSource `type:"structure"` + // The S3 location of the data source that is associated with a channel. S3DataSource *S3DataSource `type:"structure"` } @@ -11099,6 +11249,11 @@ func (s DataSource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DataSource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DataSource"} + if s.FileSystemDataSource != nil { + if err := s.FileSystemDataSource.Validate(); err != nil { + invalidParams.AddNested("FileSystemDataSource", err.(request.ErrInvalidParams)) + } + } if s.S3DataSource != nil { if err := s.S3DataSource.Validate(); err != nil { invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) @@ -11111,6 +11266,12 @@ func (s *DataSource) Validate() error { return nil } +// SetFileSystemDataSource sets the FileSystemDataSource field's value. +func (s *DataSource) SetFileSystemDataSource(v *FileSystemDataSource) *DataSource { + s.FileSystemDataSource = v + return s +} + // SetS3DataSource sets the S3DataSource field's value. func (s *DataSource) SetS3DataSource(v *S3DataSource) *DataSource { s.S3DataSource = v @@ -11686,7 +11847,7 @@ func (s *DeleteWorkteamOutput) SetSuccess(v bool) *DeleteWorkteamOutput { // of the primary container when you created the model hosted in this ProductionVariant, // the path resolves to a path of the form registry/repository[@digest]. A digest // is a hash value that identifies a specific version of an image. For information -// about Amazon ECR paths, see Pulling an Image (http://docs.aws.amazon.com//AmazonECR/latest/userguide/docker-pull-ecr-image.html) +// about Amazon ECR paths, see Pulling an Image (https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-pull-ecr-image.html) // in the Amazon ECR User Guide. type DeployedImage struct { _ struct{} `type:"structure"` @@ -12855,7 +13016,7 @@ type DescribeLabelingJobOutput struct { StoppingConditions *LabelingJobStoppingConditions `type:"structure"` // An array of key/value pairs. For more information, see Using Cost Allocation - // Tags (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-what) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -13425,16 +13586,16 @@ type DescribeNotebookInstanceOutput struct { // A list of the Elastic Inference (EI) instance types associated with this // notebook instance. Currently only one EI instance type can be associated // with a notebook instance. For more information, see Using Elastic Inference - // in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // in Amazon SageMaker (sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A timestamp. Use this parameter to return the time when the notebook instance @@ -13443,10 +13604,10 @@ type DescribeNotebookInstanceOutput struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // Describes whether Amazon SageMaker provides internet access to the notebook @@ -13758,6 +13919,18 @@ type DescribeTrainingJobOutput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + // The billable time in seconds. + // + // You can calculate the savings from using managed spot training using the + // formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, + // if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings + // is 80%. + BillableTimeInSeconds *int64 `min:"1" type:"integer"` + + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // A timestamp that indicates when the training job was created. // // CreationTime is a required field @@ -13770,6 +13943,10 @@ type DescribeTrainingJobOutput struct { // a deep learning algorithms in distributed training. EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // A Boolean indicating whether managed spot training is enabled (True) or not + // (False). + EnableManagedSpotTraining *bool `type:"boolean"` + // If you want to allow inbound or outbound network calls, except for calls // between peers within a training cluster for distributed training, choose // True. If you enable network isolation for training jobs that are configured @@ -13854,6 +14031,12 @@ type DescribeTrainingJobOutput struct { // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum // allowed runtime. // + // * MaxWaitTmeExceeded - The job stopped because it exceeded the maximum + // allowed wait time. + // + // * Interrupted - The job stopped because the managed spot training instances + // were interrupted. + // // * Stopped - The training job has stopped. // // Stopping @@ -13877,9 +14060,10 @@ type DescribeTrainingJobOutput struct { // through. SecondaryStatusTransitions []*SecondaryStatusTransition `type:"list"` - // Specifies a limit to how long a model training job can run. When the job - // reaches the time limit, Amazon SageMaker ends the training job. Use this - // API to cap model training costs. + // Specifies a limit to how long a model training job can run. It also specifies + // the maximum time to wait for a spot instance. When the job reaches the time + // limit, Amazon SageMaker ends the training job. Use this API to cap model + // training costs. // // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which // delays job termination for 120 seconds. Algorithms can use this 120-second @@ -13933,6 +14117,9 @@ type DescribeTrainingJobOutput struct { // of the training container. TrainingStartTime *time.Time `type:"timestamp"` + // The training time in seconds. + TrainingTimeInSeconds *int64 `min:"1" type:"integer"` + // The Amazon Resource Name (ARN) of the associated hyperparameter tuning job // if the training job was launched by a hyperparameter tuning job. TuningJobArn *string `type:"string"` @@ -13959,6 +14146,18 @@ func (s *DescribeTrainingJobOutput) SetAlgorithmSpecification(v *AlgorithmSpecif return s } +// SetBillableTimeInSeconds sets the BillableTimeInSeconds field's value. +func (s *DescribeTrainingJobOutput) SetBillableTimeInSeconds(v int64) *DescribeTrainingJobOutput { + s.BillableTimeInSeconds = &v + return s +} + +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *DescribeTrainingJobOutput) SetCheckpointConfig(v *CheckpointConfig) *DescribeTrainingJobOutput { + s.CheckpointConfig = v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *DescribeTrainingJobOutput) SetCreationTime(v time.Time) *DescribeTrainingJobOutput { s.CreationTime = &v @@ -13971,6 +14170,12 @@ func (s *DescribeTrainingJobOutput) SetEnableInterContainerTrafficEncryption(v b return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *DescribeTrainingJobOutput) SetEnableManagedSpotTraining(v bool) *DescribeTrainingJobOutput { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *DescribeTrainingJobOutput) SetEnableNetworkIsolation(v bool) *DescribeTrainingJobOutput { s.EnableNetworkIsolation = &v @@ -14085,6 +14290,12 @@ func (s *DescribeTrainingJobOutput) SetTrainingStartTime(v time.Time) *DescribeT return s } +// SetTrainingTimeInSeconds sets the TrainingTimeInSeconds field's value. +func (s *DescribeTrainingJobOutput) SetTrainingTimeInSeconds(v int64) *DescribeTrainingJobOutput { + s.TrainingTimeInSeconds = &v + return s +} + // SetTuningJobArn sets the TuningJobArn field's value. func (s *DescribeTrainingJobOutput) SetTuningJobArn(v string) *DescribeTrainingJobOutput { s.TuningJobArn = &v @@ -14154,10 +14365,13 @@ type DescribeTransformJobOutput struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` - // The data structure used to combine the input data and transformed data from - // the batch transform output into a joined dataset and to store it in an output - // file. It also contains information on how to filter the input data and the - // joined dataset. For more information, see Batch Transform I/O Join (http://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-io-join.html). + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). DataProcessing *DataProcessing `type:"structure"` // The environment variables to set in the Docker container. We support up to @@ -14167,7 +14381,7 @@ type DescribeTransformJobOutput struct { // If the transform job failed, FailureReason describes why it failed. A transform // job creates a log file, which includes error messages, and stores it as an // Amazon S3 object. For more information, see Log Amazon SageMaker Events with - // Amazon CloudWatch (http://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). + // Amazon CloudWatch (https://docs.aws.amazon.com/sagemaker/latest/dg/logging-cloudwatch.html). FailureReason *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon SageMaker Ground Truth labeling @@ -14613,6 +14827,91 @@ func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { return s } +// Specifies a file system data source for a channel. +type FileSystemDataSource struct { + _ struct{} `type:"structure"` + + // The full path to the directory to associate with the channel. + // + // DirectoryPath is a required field + DirectoryPath *string `type:"string" required:"true"` + + // The access mode of the mount of the directory associated with the channel. + // A directory can be mounted either in ro (read-only) or rw (read-write). + // + // FileSystemAccessMode is a required field + FileSystemAccessMode *string `type:"string" required:"true" enum:"FileSystemAccessMode"` + + // The file system id. + // + // FileSystemId is a required field + FileSystemId *string `min:"11" type:"string" required:"true"` + + // The file system type. + // + // FileSystemType is a required field + FileSystemType *string `type:"string" required:"true" enum:"FileSystemType"` +} + +// String returns the string representation +func (s FileSystemDataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemDataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FileSystemDataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FileSystemDataSource"} + if s.DirectoryPath == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryPath")) + } + if s.FileSystemAccessMode == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemAccessMode")) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) + } + if s.FileSystemType == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDirectoryPath sets the DirectoryPath field's value. +func (s *FileSystemDataSource) SetDirectoryPath(v string) *FileSystemDataSource { + s.DirectoryPath = &v + return s +} + +// SetFileSystemAccessMode sets the FileSystemAccessMode field's value. +func (s *FileSystemDataSource) SetFileSystemAccessMode(v string) *FileSystemDataSource { + s.FileSystemAccessMode = &v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *FileSystemDataSource) SetFileSystemId(v string) *FileSystemDataSource { + s.FileSystemId = &v + return s +} + +// SetFileSystemType sets the FileSystemType field's value. +func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource { + s.FileSystemType = &v + return s +} + // A conditional statement for a search expression that includes a Boolean operator, // a resource property, and a value. // @@ -15037,6 +15336,8 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition + // // US East (Ohio) (us-east-2): // // * arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox @@ -15047,6 +15348,8 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition + // // US West (Oregon) (us-west-2): // // * arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox @@ -15057,6 +15360,20 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition + // + // Canada (Central) (ca-central-1): + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-BoundingBox + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-ImageMultiClass + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-TextMultiClass + // + // * arn:awslambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition + // // EU (Ireland) (eu-west-1): // // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox @@ -15067,6 +15384,32 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition + // + // EU (London) (eu-west-2): + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-BoundingBox + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-ImageMultiClass + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-TextMultiClass + // + // * arn:awslambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition + // + // EU Frankfurt (eu-central-1): + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-BoundingBox + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-ImageMultiClass + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-TextMultiClass + // + // * arn:awslambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition + // // Asia Pacific (Tokyo) (ap-northeast-1): // // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox @@ -15077,7 +15420,45 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass // - // Asia Pacific (Sydney) (ap-southeast-1): + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Seoul) (ap-northeast-2): + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-BoundingBox + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Mumbai) (ap-south-1): + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-BoundingBox + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Singapore) (ap-southeast-1): + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-BoundingBox + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass + // + // * arn:awslambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition + // + // Asia Pacific (Sydney) (ap-southeast-2): // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox // @@ -15087,13 +15468,18 @@ type HumanTaskConfig struct { // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition + // // PreHumanTaskLambdaArn is a required field PreHumanTaskLambdaArn *string `type:"string" required:"true"` - // The price that you pay for each task performed by a public worker. + // The price that you pay for each task performed by an Amazon Mechanical Turk + // worker. PublicWorkforceTaskPrice *PublicWorkforceTaskPrice `type:"structure"` - // The length of time that a task remains available for labelling by human workers. + // The length of time that a task remains available for labeling by human workers. + // If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours + // (43200). For private and vendor workforces, the maximum is as listed. TaskAvailabilityLifetimeInSeconds *int64 `min:"1" type:"integer"` // A description of the task for your human workers. @@ -15108,7 +15494,7 @@ type HumanTaskConfig struct { // The amount of time that a worker has to complete a task. // // TaskTimeLimitInSeconds is a required field - TaskTimeLimitInSeconds *int64 `min:"1" type:"integer" required:"true"` + TaskTimeLimitInSeconds *int64 `min:"30" type:"integer" required:"true"` // A title for the task for your human workers. // @@ -15171,8 +15557,8 @@ func (s *HumanTaskConfig) Validate() error { if s.TaskTimeLimitInSeconds == nil { invalidParams.Add(request.NewErrParamRequired("TaskTimeLimitInSeconds")) } - if s.TaskTimeLimitInSeconds != nil && *s.TaskTimeLimitInSeconds < 1 { - invalidParams.Add(request.NewErrParamMinValue("TaskTimeLimitInSeconds", 1)) + if s.TaskTimeLimitInSeconds != nil && *s.TaskTimeLimitInSeconds < 30 { + invalidParams.Add(request.NewErrParamMinValue("TaskTimeLimitInSeconds", 30)) } if s.TaskTitle == nil { invalidParams.Add(request.NewErrParamRequired("TaskTitle")) @@ -15489,6 +15875,10 @@ type HyperParameterTrainingJobDefinition struct { // AlgorithmSpecification is a required field AlgorithmSpecification *HyperParameterAlgorithmSpecification `type:"structure" required:"true"` + // Contains information about the output location for managed spot training + // checkpoint data. + CheckpointConfig *CheckpointConfig `type:"structure"` + // To encrypt all communications between ML compute instances in distributed // training, choose True. Encryption provides greater security for distributed // training, but training might take longer. How long it takes depends on the @@ -15496,6 +15886,10 @@ type HyperParameterTrainingJobDefinition struct { // a deep learning algorithm in distributed training. EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // A Boolean indicating whether managed spot training is enabled (True) or not + // (False). + EnableManagedSpotTraining *bool `type:"boolean"` + // Isolates the training container. No inbound or outbound network calls can // be made, except for calls between peers within a training cluster for distributed // training. If network isolation is used for training jobs that are configured @@ -15539,8 +15933,9 @@ type HyperParameterTrainingJobDefinition struct { StaticHyperParameters map[string]*string `type:"map"` // Specifies a limit to how long a model hyperparameter training job can run. - // When the job reaches the time limit, Amazon SageMaker ends the training job. - // Use this API to cap model training costs. + // It also specifies how long you are willing to wait for a managed spot training + // job to complete. When the job reaches the a limit, Amazon SageMaker ends + // the training job. Use this API to cap model training costs. // // StoppingCondition is a required field StoppingCondition *StoppingCondition `type:"structure" required:"true"` @@ -15591,6 +15986,11 @@ func (s *HyperParameterTrainingJobDefinition) Validate() error { invalidParams.AddNested("AlgorithmSpecification", err.(request.ErrInvalidParams)) } } + if s.CheckpointConfig != nil { + if err := s.CheckpointConfig.Validate(); err != nil { + invalidParams.AddNested("CheckpointConfig", err.(request.ErrInvalidParams)) + } + } if s.InputDataConfig != nil { for i, v := range s.InputDataConfig { if v == nil { @@ -15634,12 +16034,24 @@ func (s *HyperParameterTrainingJobDefinition) SetAlgorithmSpecification(v *Hyper return s } +// SetCheckpointConfig sets the CheckpointConfig field's value. +func (s *HyperParameterTrainingJobDefinition) SetCheckpointConfig(v *CheckpointConfig) *HyperParameterTrainingJobDefinition { + s.CheckpointConfig = v + return s +} + // SetEnableInterContainerTrafficEncryption sets the EnableInterContainerTrafficEncryption field's value. func (s *HyperParameterTrainingJobDefinition) SetEnableInterContainerTrafficEncryption(v bool) *HyperParameterTrainingJobDefinition { s.EnableInterContainerTrafficEncryption = &v return s } +// SetEnableManagedSpotTraining sets the EnableManagedSpotTraining field's value. +func (s *HyperParameterTrainingJobDefinition) SetEnableManagedSpotTraining(v bool) *HyperParameterTrainingJobDefinition { + s.EnableManagedSpotTraining = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *HyperParameterTrainingJobDefinition) SetEnableNetworkIsolation(v bool) *HyperParameterTrainingJobDefinition { s.EnableNetworkIsolation = &v @@ -15851,7 +16263,7 @@ type HyperParameterTuningJobConfig struct { // values to use for the training job it launches. To use the Bayesian search // stategy, set this to Bayesian. To randomly search, set it to Random. For // information about search strategies, see How Hyperparameter Tuning Works - // (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). // // Strategy is a required field Strategy *string `type:"string" required:"true" enum:"HyperParameterTuningJobStrategyType"` @@ -15869,7 +16281,7 @@ type HyperParameterTuningJobConfig struct { // // Amazon SageMaker stops training jobs launched by the hyperparameter tuning // job when they are unlikely to perform better than previously completed training - // jobs. For more information, see Stop Training Jobs Early (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html). + // jobs. For more information, see Stop Training Jobs Early (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html). TrainingJobEarlyStoppingType *string `type:"string" enum:"TrainingJobEarlyStoppingType"` } @@ -16147,7 +16559,7 @@ type HyperParameterTuningJobWarmStartConfig struct { // An array of hyperparameter tuning jobs that are used as the starting point // for the new hyperparameter tuning job. For more information about warm starting // a hyperparameter tuning job, see Using a Previous Hyperparameter Tuning Job - // as a Starting Point (http://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-warm-start.html). + // as a Starting Point (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-warm-start.html). // // Hyperparameter tuning jobs created before October 1, 2018 cannot be used // as parent jobs for warm start tuning jobs. @@ -16477,7 +16889,7 @@ type IntegerParameterRange struct { // The scale that hyperparameter tuning uses to search the hyperparameter range. // For information about choosing a hyperparameter scale, see Hyperparameter - // Scaling (http://docs.aws.amazon.com//sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). + // Scaling (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-ranges.html#scaling-type). // One of the following values: // // Auto @@ -16727,6 +17139,8 @@ type LabelingJobAlgorithmsConfig struct { // // * Object detection arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection // + // * Semantic Segmentation arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation + // // LabelingJobAlgorithmSpecificationArn is a required field LabelingJobAlgorithmSpecificationArn *string `type:"string" required:"true"` @@ -17080,8 +17494,14 @@ func (s *LabelingJobOutputConfig) SetS3OutputPath(v string) *LabelingJobOutputCo type LabelingJobResourceConfig struct { _ struct{} `type:"structure"` - // The AWS Key Management Service key ID for the key used to encrypt the output - // data, if any. + // The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to + // encrypt data on the storage volume attached to the ML compute instance(s) + // that run the training job. The VolumeKmsKeyId can be any of the following + // formats: + // + // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" + // + // * // Amazon Resource Name (ARN) of a KMS Key "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string `type:"string"` } @@ -17198,7 +17618,7 @@ type LabelingJobSummary struct { // The Amazon Resource Name (ARN) of the Lambda function used to consolidate // the annotations from individual workers into a label for a data object. For - // more information, see Annotation Consolidation (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). + // more information, see Annotation Consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). AnnotationConsolidationLambdaArn *string `type:"string"` // The date and time that the job was created (timestamp). @@ -19980,10 +20400,10 @@ func (s *MetricData) SetValue(v float64) *MetricData { return s } -// Specifies a metric that the training algorithm writes to stderr or stdout. -// Amazon SageMakerhyperparameter tuning captures all defined metrics. You specify -// one metric that a hyperparameter tuning job uses as its objective metric -// to choose the best training job. +// Specifies a metric that the training algorithm writes to stderr or stdout +// . Amazon SageMakerhyperparameter tuning captures all defined metrics. You +// specify one metric that a hyperparameter tuning job uses as its objective +// metric to choose the best training job. type MetricDefinition struct { _ struct{} `type:"structure"` @@ -20518,7 +20938,7 @@ type NestedFilters struct { Filters []*Filter `min:"1" type:"list" required:"true"` // The name of the property to use in the nested filters. The value must match - // a listed property name, such as InputDataConfig. + // a listed property name, such as InputDataConfig . // // NestedPropertyName is a required field NestedPropertyName *string `min:"1" type:"string" required:"true"` @@ -20692,11 +21112,11 @@ type NotebookInstanceSummary struct { // An array of up to three Git repositories associated with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // A timestamp that shows when the notebook instance was created. @@ -20704,10 +21124,10 @@ type NotebookInstanceSummary struct { // The Git repository associated with the notebook instance as its default code // repository. This can be either the name of a Git repository stored as a resource - // in your account, or the URL of a Git repository in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // in your account, or the URL of a Git repository in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // The type of ML compute instance that the notebook instance is running on. @@ -20970,7 +21390,7 @@ type OutputDataConfig struct { // // The KMS key policy must grant permission to the IAM role that you specify // in your CreateTrainingJob, CreateTransformJob, or CreateHyperParameterTuningJob - // requests. For more information, see Using Key Policies in AWS KMS (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) + // requests. For more information, see Using Key Policies in AWS KMS (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) // in the AWS Key Management Service Developer Guide. KmsKeyId *string `type:"string"` @@ -21228,8 +21648,7 @@ type ProductionVariant struct { // The size of the Elastic Inference (EI) instance to use for the production // variant. EI instances provide on-demand GPU computing for inference. For - // more information, see Using Elastic Inference in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). - // For more information, see Using Elastic Inference in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // more information, see Using Elastic Inference in Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). AcceleratorType *string `type:"string" enum:"ProductionVariantAcceleratorType"` // Number of instances to launch initially. @@ -21479,7 +21898,8 @@ func (s *PropertyNameSuggestion) SetPropertyName(v string) *PropertyNameSuggesti // each task performed. // // Use one of the following prices for bounding box tasks. Prices are in US -// dollars. +// dollars and should be based on the complexity of the task; the longer it +// takes in your initial testing, the more you should offer. // // * 0.036 // @@ -21557,7 +21977,8 @@ func (s *PropertyNameSuggestion) SetPropertyName(v string) *PropertyNameSuggesti type PublicWorkforceTaskPrice struct { _ struct{} `type:"structure"` - // Defines the amount of money paid to a worker in United States dollars. + // Defines the amount of money paid to an Amazon Mechanical Turk worker in United + // States dollars. AmountInUsd *USD `type:"structure"` } @@ -23017,8 +23438,9 @@ func (s StopTransformJobOutput) GoString() string { } // Specifies a limit to how long a model training or compilation job can run. -// When the job reaches the time limit, Amazon SageMaker ends the training or -// compilation job. Use this API to cap model training costs. +// It also specifies how long you are willing to wait for a managed spot training +// job to complete. When the job reaches the time limit, Amazon SageMaker ends +// the training or compilation job. Use this API to cap model training costs. // // To stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal, which // delays job termination for 120 seconds. Algorithms can use this 120-second @@ -23042,6 +23464,12 @@ type StoppingCondition struct { // ends the job. If value is not specified, default value is 1 day. The maximum // value is 28 days. MaxRuntimeInSeconds *int64 `min:"1" type:"integer"` + + // The maximum length of time, in seconds, how long you are willing to wait + // for a managed spot training job to complete. It is the amount of time spent + // waiting for Spot capacity plus the amount of time the training job runs. + // It must be equal to or greater than MaxRuntimeInSeconds. + MaxWaitTimeInSeconds *int64 `min:"1" type:"integer"` } // String returns the string representation @@ -23060,6 +23488,9 @@ func (s *StoppingCondition) Validate() error { if s.MaxRuntimeInSeconds != nil && *s.MaxRuntimeInSeconds < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxRuntimeInSeconds", 1)) } + if s.MaxWaitTimeInSeconds != nil && *s.MaxWaitTimeInSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxWaitTimeInSeconds", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -23073,6 +23504,12 @@ func (s *StoppingCondition) SetMaxRuntimeInSeconds(v int64) *StoppingCondition { return s } +// SetMaxWaitTimeInSeconds sets the MaxWaitTimeInSeconds field's value. +func (s *StoppingCondition) SetMaxWaitTimeInSeconds(v int64) *StoppingCondition { + s.MaxWaitTimeInSeconds = &v + return s +} + // Describes a work team of a vendor that does the a labelling job. type SubscribedWorkteam struct { _ struct{} `type:"structure"` @@ -24498,7 +24935,7 @@ type TransformResources struct { // The ML compute instance type for the transform job. If you are using built-in // algorithms to transform moderately sized datasets, we recommend using ml.m4.xlarge - // or ml.m5.largeinstance types. + // or ml.m5.large instance types. // // InstanceType is a required field InstanceType *string `type:"string" required:"true" enum:"TransformInstanceType"` @@ -24684,7 +25121,7 @@ type UiConfig struct { // The Amazon S3 bucket location of the UI template. For more information about // the contents of a UI template, see Creating Your Custom Labeling Task Template - // (http://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). // // UiTemplateS3Uri is a required field UiTemplateS3Uri *string `type:"string" required:"true"` @@ -25019,25 +25456,25 @@ type UpdateNotebookInstanceInput struct { // A list of the Elastic Inference (EI) instance types to associate with this // notebook instance. Currently only one EI instance type can be associated // with a notebook instance. For more information, see Using Elastic Inference - // in Amazon SageMaker (http://docs.aws.amazon.com/sagemaker/latest/dg/ei.html). + // in Amazon SageMaker (sagemaker/latest/dg/ei.html). AcceleratorTypes []*string `type:"list"` // An array of up to three Git repositories to associate with the notebook instance. // These can be either the names of Git repositories stored as resources in - // your account, or the URL of Git repositories in AWS CodeCommit (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) + // your account, or the URL of Git repositories in AWS CodeCommit (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) // or in any other Git repository. These repositories are cloned at the same // level as the default repository of your notebook instance. For more information, // see Associating Git Repositories with Amazon SageMaker Notebook Instances - // (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). AdditionalCodeRepositories []*string `type:"list"` // The Git repository to associate with the notebook instance as its default // code repository. This can be either the name of a Git repository stored as // a resource in your account, or the URL of a Git repository in AWS CodeCommit - // (http://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or + // (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html) or // in any other Git repository. When you open a notebook instance, it opens // in the directory that contains this repository. For more information, see - // Associating Git Repositories with Amazon SageMaker Notebook Instances (http://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). + // Associating Git Repositories with Amazon SageMaker Notebook Instances (https://docs.aws.amazon.com/sagemaker/latest/dg/nbi-git-repo.html). DefaultCodeRepository *string `min:"1" type:"string"` // A list of the Elastic Inference (EI) instance types to remove from this notebook @@ -25220,11 +25657,13 @@ type UpdateNotebookInstanceLifecycleConfigInput struct { // NotebookInstanceLifecycleConfigName is a required field NotebookInstanceLifecycleConfigName *string `type:"string" required:"true"` - // The shell script that runs only once, when you create a notebook instance + // The shell script that runs only once, when you create a notebook instance. + // The shell script must be a base64-encoded string. OnCreate []*NotebookInstanceLifecycleHook `type:"list"` // The shell script that runs every time you start a notebook instance, including - // when you create the notebook instance. + // when you create the notebook instance. The shell script must be a base64-encoded + // string. OnStart []*NotebookInstanceLifecycleHook `type:"list"` } @@ -25787,6 +26226,22 @@ const ( EndpointStatusFailed = "Failed" ) +const ( + // FileSystemAccessModeRw is a FileSystemAccessMode enum value + FileSystemAccessModeRw = "rw" + + // FileSystemAccessModeRo is a FileSystemAccessMode enum value + FileSystemAccessModeRo = "ro" +) + +const ( + // FileSystemTypeEfs is a FileSystemType enum value + FileSystemTypeEfs = "EFS" + + // FileSystemTypeFsxLustre is a FileSystemType enum value + FileSystemTypeFsxLustre = "FSxLustre" +) + const ( // FrameworkTensorflow is a Framework enum value FrameworkTensorflow = "TENSORFLOW" @@ -26391,6 +26846,12 @@ const ( // SecondaryStatusFailed is a SecondaryStatus enum value SecondaryStatusFailed = "Failed" + + // SecondaryStatusInterrupted is a SecondaryStatus enum value + SecondaryStatusInterrupted = "Interrupted" + + // SecondaryStatusMaxWaitTimeExceeded is a SecondaryStatus enum value + SecondaryStatusMaxWaitTimeExceeded = "MaxWaitTimeExceeded" ) const ( @@ -26469,8 +26930,17 @@ const ( // TargetDeviceRk3288 is a TargetDevice enum value TargetDeviceRk3288 = "rk3288" + // TargetDeviceAisage is a TargetDevice enum value + TargetDeviceAisage = "aisage" + // TargetDeviceSbeC is a TargetDevice enum value TargetDeviceSbeC = "sbe_c" + + // TargetDeviceQcs605 is a TargetDevice enum value + TargetDeviceQcs605 = "qcs605" + + // TargetDeviceQcs603 is a TargetDevice enum value + TargetDeviceQcs603 = "qcs603" ) const ( @@ -26559,6 +27029,9 @@ const ( // TrainingInstanceTypeMlC518xlarge is a TrainingInstanceType enum value TrainingInstanceTypeMlC518xlarge = "ml.c5.18xlarge" + + // TrainingInstanceTypeMlP3dn24xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlP3dn24xlarge = "ml.p3dn.24xlarge" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index 8d71f726548..823802f3b01 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -1884,9 +1884,6 @@ func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, outpu // * A new tag with a key identical to that of an existing tag overwrites // the existing tag. // -// * Tagging actions are limited to 5 TPS per AWS account. If your application -// requires a higher throughput, file a technical support request (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). -// // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) // in the Amazon Simple Queue Service Developer Guide. // @@ -2570,6 +2567,33 @@ type CreateQueueInput struct { // // QueueName is a required field QueueName *string `type:"string" required:"true"` + + // Add cost allocation tags to the specified Amazon SQS queue. For an overview, + // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) + // in the Amazon Simple Queue Service Developer Guide. + // + // When you use queue tags, keep the following guidelines in mind: + // + // * Adding more than 50 tags to a queue isn't recommended. + // + // * Tags don't have any semantic meaning. Amazon SQS interprets tags as + // character strings. + // + // * Tags are case-sensitive. + // + // * A new tag with a key identical to that of an existing tag overwrites + // the existing tag. + // + // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) + // in the Amazon Simple Queue Service Developer Guide. + // + // To be able to tag a queue on creation, you must have the sqs:CreateQueue + // and sqs:TagQueue permissions. + // + // Cross-account permissions don't apply to this action. For more information, + // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) + // in the Amazon Simple Queue Service Developer Guide. + Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"` } // String returns the string representation @@ -2607,6 +2631,12 @@ func (s *CreateQueueInput) SetQueueName(v string) *CreateQueueInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput { + s.Tags = v + return s +} + // Returns the QueueUrl attribute of the created queue. type CreateQueueOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go index 1eb9b6021c7..59e11cfcfb6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go @@ -5348,12 +5348,12 @@ func (c *StorageGateway) NotifyWhenUploadedRequest(input *NotifyWhenUploadedInpu // NotifyWhenUploaded API operation for AWS Storage Gateway. // // Sends you notification through CloudWatch Events when all files written to -// your NFS file share have been uploaded to Amazon S3. +// your file share have been uploaded to Amazon S3. // // AWS Storage Gateway can send a notification through Amazon CloudWatch Events // when all files written to your file share up to that point in time have been -// uploaded to Amazon S3. These files include files written to the NFS file -// share up to the time that you make a request for notification. When the upload +// uploaded to Amazon S3. These files include files written to the file share +// up to the time that you make a request for notification. When the upload // is done, Storage Gateway sends you notification through an Amazon CloudWatch // Event. You can configure CloudWatch Events to send the notification through // event targets such as Amazon SNS or AWS Lambda function. This operation is @@ -6935,6 +6935,10 @@ func (c *StorageGateway) UpdateSMBSecurityStrategyRequest(input *UpdateSMBSecuri // Updates the SMB security strategy on a file gateway. This action is only // supported in file gateways. // +// This API is called Security level in the User Guide. +// +// A higher security level can affect performance of the gateway. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8849,9 +8853,12 @@ func (s *CreateNFSFileShareOutput) SetFileShareARN(v string) *CreateNFSFileShare type CreateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users or groups in the Active Directory that have administrator - // rights to the file share. A group must be prefixed with the @ character. - // For example @group1. Can only be set if Authentication is set to ActiveDirectory. + // A list of users in the Active Directory that will be granted administrator + // privileges on the file share. These users can do all file operations as the + // super-user. + // + // Use this option very carefully, because any user in this list can do anything + // they like on the file share, regardless of file permissions. AdminUserList []*string `type:"list"` // The authentication method that users use to access the file share. @@ -9145,6 +9152,15 @@ type CreateSnapshotFromVolumeRecoveryPointInput struct { // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` + // A list of up to 50 tags that can be assigned to a snapshot. Each tag is a + // key-value pair. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. The + // maximum length of a tag's key is 128 characters, and the maximum length for + // a tag's value is 256. + Tags []*Tag `type:"list"` + // The Amazon Resource Name (ARN) of the iSCSI volume target. Use the DescribeStorediSCSIVolumes // operation to return to retrieve the TargetARN for specified VolumeARN. // @@ -9177,6 +9193,16 @@ func (s *CreateSnapshotFromVolumeRecoveryPointInput) Validate() error { if s.VolumeARN != nil && len(*s.VolumeARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("VolumeARN", 50)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9190,6 +9216,12 @@ func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetSnapshotDescription(v st return s } +// SetTags sets the Tags field's value. +func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetTags(v []*Tag) *CreateSnapshotFromVolumeRecoveryPointInput { + s.Tags = v + return s +} + // SetVolumeARN sets the VolumeARN field's value. func (s *CreateSnapshotFromVolumeRecoveryPointInput) SetVolumeARN(v string) *CreateSnapshotFromVolumeRecoveryPointInput { s.VolumeARN = &v @@ -11472,14 +11504,18 @@ type DescribeSMBSettingsOutput struct { // The type of security strategy that was specified for file gateway. // - // ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, - // SMB encryption is offered but not required. + // ClientSpecified: if you use this option, requests are established based on + // what is negotiated by the client. This option is recommended when you want + // to maximize compatibility across different clients in your environment. // - // MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption - // is offered but not required. + // MandatorySigning: if you use this option, file gateway only allows connections + // from SMBv2 or SMBv3 clients that have signing enabled. This option works + // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // - // MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, - // SMB encryption is required. + // MandatoryEncryption: if you use this option, file gateway only allows connections + // from SMBv3 clients that have encryption enabled. This option is highly recommended + // for environments that handle sensitive data. This option works with SMB clients + // on Microsoft Windows 8, Windows Server 2012 or newer. SMBSecurityStrategy *string `type:"string" enum:"SMBSecurityStrategy"` } @@ -11575,6 +11611,11 @@ type DescribeSnapshotScheduleOutput struct { // of the gateway. StartAt *int64 `type:"integer"` + // A list of up to 50 tags assigned to the snapshot schedule, sorted alphabetically + // by key name. Each tag is a key-value pair. For a gateway with more than 10 + // tags assigned, you can view all tags using the ListTagsForResource API operation. + Tags []*Tag `type:"list"` + // A value that indicates the time zone of the gateway. Timezone *string `min:"3" type:"string"` @@ -11610,6 +11651,12 @@ func (s *DescribeSnapshotScheduleOutput) SetStartAt(v int64) *DescribeSnapshotSc return s } +// SetTags sets the Tags field's value. +func (s *DescribeSnapshotScheduleOutput) SetTags(v []*Tag) *DescribeSnapshotScheduleOutput { + s.Tags = v + return s +} + // SetTimezone sets the Timezone field's value. func (s *DescribeSnapshotScheduleOutput) SetTimezone(v string) *DescribeSnapshotScheduleOutput { s.Timezone = &v @@ -12885,8 +12932,8 @@ type JoinDomainInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // The organizational unit (OU) is a container with an Active Directory that - // can hold users, groups, computers, and other OUs and this parameter specifies + // The organizational unit (OU) is a container in an Active Directory that can + // hold users, groups, computers, and other OUs and this parameter specifies // the OU that the gateway will join within the AD domain. OrganizationalUnit *string `min:"1" type:"string"` @@ -16482,9 +16529,9 @@ func (s *UpdateNFSFileShareOutput) SetFileShareARN(v string) *UpdateNFSFileShare type UpdateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users or groups in the Active Directory that have administrator - // rights to the file share. A group must be prefixed with the @ character. - // For example @group1. Can only be set if Authentication is set to ActiveDirectory. + // A list of users in the Active Directory that have administrator rights to + // the file share. A group must be prefixed with the @ character. For example + // @group1. Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the @@ -16686,14 +16733,18 @@ type UpdateSMBSecurityStrategyInput struct { // Specifies the type of security strategy. // - // ClientSpecified: SMBv1 is enabled, SMB signing is offered but not required, - // SMB encryption is offered but not required. + // ClientSpecified: if you use this option, requests are established based on + // what is negotiated by the client. This option is recommended when you want + // to maximize compatibility across different clients in your environment. // - // MandatorySigning: SMBv1 is disabled, SMB signing is required, SMB encryption - // is offered but not required. + // MandatorySigning: if you use this option, file gateway only allows connections + // from SMBv2 or SMBv3 clients that have signing enabled. This option works + // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // - // MandatoryEncryption: SMBv1 is disabled, SMB signing is offered but not required, - // SMB encryption is required. + // MandatoryEncryption: if you use this option, file gateway only allows connections + // from SMBv3 clients that have encryption enabled. This option is highly recommended + // for environments that handle sensitive data. This option works with SMB clients + // on Microsoft Windows 8, Windows Server 2012 or newer. // // SMBSecurityStrategy is a required field SMBSecurityStrategy *string `type:"string" required:"true" enum:"SMBSecurityStrategy"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go index 57d6579b756..575135be927 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go @@ -58,11 +58,9 @@ func (c *Transfer) CreateServerRequest(input *CreateServerInput) (req *request.R // CreateServer API operation for AWS Transfer for SFTP. // // Instantiates an autoscaling virtual server based on Secure File Transfer -// Protocol (SFTP) in AWS. The call returns the ServerId property assigned by -// the service to the newly created server. Reference this ServerId property -// when you make updates to your server, or work with users. -// -// The response returns the ServerId value for the newly created server. +// Protocol (SFTP) in AWS. When you make updates to your server or when you +// work with users, use the service-generated ServerId property that is assigned +// to the newly created server. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,15 +149,13 @@ func (c *Transfer) CreateUserRequest(input *CreateUserInput) (req *request.Reque // CreateUser API operation for AWS Transfer for SFTP. // -// Adds a user and associate them with an existing Secure File Transfer Protocol -// (SFTP) server. Using parameters for CreateUser, you can specify the user -// name, set the home directory, store the user's public key, and assign the -// user's AWS Identity and Access Management (IAM) role. You can also optionally -// add a scope-down policy, and assign metadata with tags that can be used to -// group and search for users. -// -// The response returns the UserName and ServerId values of the new user for -// that server. +// Creates a user and associates them with an existing Secure File Transfer +// Protocol (SFTP) server. You can only create and associate users with SFTP +// servers that have the IdentityProviderType set to SERVICE_MANAGED. Using +// parameters for CreateUser, you can specify the user name, set the home directory, +// store the user's public key, and assign the user's AWS Identity and Access +// Management (IAM) role. You can also optionally add a scope-down policy, and +// assign metadata with tags that can be used to group and search for users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -254,10 +250,8 @@ func (c *Transfer) DeleteServerRequest(input *DeleteServerInput) (req *request.R // DeleteServer API operation for AWS Transfer for SFTP. // // Deletes the Secure File Transfer Protocol (SFTP) server that you specify. -// If you used SERVICE_MANAGED as your IdentityProviderType, you need to delete -// all users associated with this server before deleting the server itself // -// No response returns from this call. +// No response returns from this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -350,7 +344,7 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // // Deletes a user's Secure Shell (SSH) public key. // -// No response is returned from this call. +// No response is returned from this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -443,7 +437,7 @@ func (c *Transfer) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // // Deletes the user belonging to the server you specify. // -// No response returns from this call. +// No response returns from this operation. // // When you delete a user from a server, the user's information is lost. // @@ -1365,7 +1359,7 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // Stopping a server will not reduce or impact your Secure File Transfer Protocol // (SFTP) endpoint billing. // -// The states of STOPPING indicates that the server is in an intermediate state, +// The state of STOPPING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully offline. The values of STOP_FAILED // can indicate an error condition. // @@ -1551,7 +1545,7 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) // // If the IdentityProviderType of the server is API_Gateway, tests whether your // API Gateway is set up successfully. We highly recommend that you call this -// method to test your authentication method as soon as you create your server. +// operation to test your authentication method as soon as you create your server. // By doing so, you can troubleshoot issues with the API Gateway integration // to ensure that your users can successfully use the service. // @@ -1880,33 +1874,38 @@ type CreateServerInput struct { _ struct{} `type:"structure"` // The virtual private cloud (VPC) endpoint settings that you want to configure - // for your SFTP server. + // for your SFTP server. This parameter is required when you specify a value + // for the EndpointType parameter. EndpointDetails *EndpointDetails `type:"structure"` - // The type of VPC endpoint that you want your SFTP server connect to. If you - // connect to a VPC endpoint, your SFTP server isn't accessible over the public - // internet. + // The type of VPC endpoint that you want your SFTP server to connect to. If + // you connect to a VPC endpoint, your SFTP server isn't accessible over the + // public internet. EndpointType *string `type:"string" enum:"EndpointType"` - // The RSA private key as generated by ssh-keygen -N "" -f my-new-server-key + // The RSA private key as generated by the ssh-keygen -N "" -f my-new-server-key // command. // // If you aren't planning to migrate existing users from an existing SFTP server // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. For more information, see change-host-key + // a server's host key can be disruptive. + // + // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/change-host-key" // in the AWS SFTP User Guide. - HostKey *string `type:"string"` + HostKey *string `type:"string" sensitive:"true"` - // An array containing all of the information required to call a customer-supplied - // authentication API. This parameter is not required when the IdentityProviderType - // value of server that is created uses the SERVICE_MANAGED authentication method. + // This parameter is required when the IdentityProviderType is set to API_GATEWAY. + // Accepts an array containing all of the information required to call a customer-supplied + // authentication API, including the API Gateway URL. This property is not required + // when the IdentityProviderType is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // The mode of authentication enabled for this service. The default value is - // SERVICE_MANAGED, which allows you to store and access SFTP user credentials - // within the service. An IdentityProviderType value of API_GATEWAY indicates - // that user authentication requires a call to an API Gateway endpoint URL provided - // by you to integrate an identity provider of your choice. + // Specifies the mode of authentication for the SFTP server. The default value + // is SERVICE_MANAGED, which allows you to store and access SFTP user credentials + // within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate + // with an identity provider of your choosing. The API_GATEWAY setting requires + // you to provide an API Gateway endpoint URL to call for authentication using + // the IdentityProviderDetails parameter. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // A value that allows the service to write your SFTP users' activity to your @@ -2026,8 +2025,18 @@ type CreateUserInput struct { // A scope-down policy for your user so you can use the same IAM role across // multiple users. This policy scopes down user access to portions of their - // Amazon S3 bucket. Variables you can use inside this policy include ${Transfer:UserName}, + // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, // ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. + // + // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the + // policy as a JSON blob and pass it in the Policy argument. + // + // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating + // a Scope-Down Policy. + // + // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // in the AWS Security Token Service API Reference. Policy *string `type:"string"` // The IAM role that controls your user's access to your Amazon S3 bucket. The @@ -2046,7 +2055,7 @@ type CreateUserInput struct { // ServerId is a required field ServerId *string `type:"string" required:"true"` - // The public portion of the Secure Shall (SSH) key used to authenticate the + // The public portion of the Secure Shell (SSH) key used to authenticate the // user to the SFTP server. SshPublicKeyBody *string `type:"string"` @@ -2456,8 +2465,8 @@ type DescribeUserInput struct { ServerId *string `type:"string" required:"true"` // The name of the user assigned to one or more servers. User names are part - // of the sign-in credentials to use the AWS Transfer service and perform file - // transfer tasks. + // of the sign-in credentials to use the AWS Transfer for SFTP service and perform + // file transfer tasks. // // UserName is a required field UserName *string `type:"string" required:"true"` @@ -2539,9 +2548,10 @@ func (s *DescribeUserOutput) SetUser(v *DescribedUser) *DescribeUserOutput { return s } -// Describe the properties of the server that was specified. Information returned -// includes: the server Amazon Resource Name (ARN), the authentication configuration -// and type, the logging role, server Id and state, and assigned tags or metadata. +// Describes the properties of the server that was specified. Information returned +// includes the following: the server Amazon Resource Name (ARN), the authentication +// configuration and type, the logging role, the server ID and state, and assigned +// tags or metadata. type DescribedServer struct { _ struct{} `type:"structure"` @@ -2559,9 +2569,9 @@ type DescribedServer struct { // the public internet. EndpointType *string `type:"string" enum:"EndpointType"` - // This value contains the Message-Digest Algorithm (MD5) hash of the server's - // host key. This value is equivalent to the output of ssh-keygen -l -E md5 - // -f my-new-server-key command. + // This value contains the message-digest algorithm (MD5) hash of the server's + // host key. This value is equivalent to the output of the ssh-keygen -l -E + // md5 -f my-new-server-key command. HostKeyFingerprint *string `type:"string"` // Specifies information to call a customer-supplied authentication API. This @@ -2569,7 +2579,7 @@ type DescribedServer struct { IdentityProviderDetails *IdentityProviderDetails `type:"structure"` // This property defines the mode of authentication method enabled for this - // service. A value of SERVICE_MANAGED, means that you are using this Server + // service. A value of SERVICE_MANAGED means that you are using this server // to store and access SFTP user credentials within the service. A value of // API_GATEWAY indicates that you have integrated an API Gateway endpoint that // will be invoked for authenticating your user into the service. @@ -2577,10 +2587,10 @@ type DescribedServer struct { // This property is an AWS Identity and Access Management (IAM) entity that // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. - // When set, user activity can be view in your CloudWatch logs. + // When set, user activity can be viewed in your CloudWatch logs. LoggingRole *string `type:"string"` - // This property is a unique system assigned identifier for the SFTP server + // This property is a unique system-assigned identifier for the SFTP server // that you instantiate. ServerId *string `type:"string"` @@ -2589,7 +2599,7 @@ type DescribedServer struct { // State value of OFFLINE means that the server cannot perform file transfer // operations. // - // The states of STARTING and STOPPING indicated that the server is in an intermediate + // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` @@ -2679,7 +2689,7 @@ func (s *DescribedServer) SetUserCount(v int64) *DescribedServer { return s } -// Returns properties of the user that you wish to describe. +// Returns properties of the user that you want to describe. type DescribedUser struct { _ struct{} `type:"structure"` @@ -2689,9 +2699,9 @@ type DescribedUser struct { // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This property specifies the landing directory (or folder) which is the location + // This property specifies the landing directory (or folder), which is the location // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example would be: /bucket_name/home/username . + // user. An example is /bucket_name/home/username . HomeDirectory *string `type:"string"` // Specifies the name of the policy in use for the described user. @@ -2797,16 +2807,16 @@ func (s *EndpointDetails) SetVpcEndpointId(v string) *EndpointDetails { } // Returns information related to the type of user authentication that is in -// use for a server's users. A server can only have one method of authentication. +// use for a server's users. A server can have only one method of authentication. type IdentityProviderDetails struct { _ struct{} `type:"structure"` - // The Role parameter provides the type of InvocationRole used to authenticate - // the user account. + // The InvocationRole parameter provides the type of InvocationRole used to + // authenticate the user account. InvocationRole *string `type:"string"` - // The IdentityProviderDetail parameter contains the location of the service - // endpoint used to authenticate users. + // The Url parameter provides contains the location of the service endpoint + // used to authenticate users. Url *string `type:"string"` } @@ -2898,9 +2908,9 @@ func (s *ImportSshPublicKeyInput) SetUserName(v string) *ImportSshPublicKeyInput return s } -// This response identifies the user, server they belong to, and the identifier +// This response identifies the user, the server they belong to, and the identifier // of the SSH public key associated with that user. A user can have more than -// one key on each server that they are associate with. +// one key on each server that they are associated with. type ImportSshPublicKeyOutput struct { _ struct{} `type:"structure"` @@ -3050,9 +3060,9 @@ type ListTagsForResourceInput struct { // request. MaxResults *int64 `min:"1" type:"integer"` - // When you request additional results from the ListTagsForResource call, a - // NextToken parameter is returned in the input. You can then pass in a subsequent - // command the NextToken parameter to continue listing additional tags. + // When you request additional results from the ListTagsForResource operation, + // a NextToken parameter is returned in the input. You can then pass in a subsequent + // command to the NextToken parameter to continue listing additional tags. NextToken *string `min:"1" type:"string"` } @@ -3114,12 +3124,11 @@ type ListTagsForResourceOutput struct { // When you can get additional results from the ListTagsForResource call, a // NextToken parameter is returned in the output. You can then pass in a subsequent - // command the NextToken parameter to continue listing additional tags. + // command to the NextToken parameter to continue listing additional tags. NextToken *string `min:"1" type:"string"` // Key-value pairs that are assigned to a resource, usually for the purpose - // of grouping and searching for items. Tags are metadata that you define that - // you can use for any purpose. + // of grouping and searching for items. Tags are metadata that you define. Tags []*Tag `min:"1" type:"list"` } @@ -3159,11 +3168,11 @@ type ListUsersInput struct { // When you can get additional results from the ListUsers call, a NextToken // parameter is returned in the output. You can then pass in a subsequent command - // the NextToken parameter to continue listing additional users. + // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) - // server that has users are assigned to it. + // server that has users assigned to it. // // ServerId is a required field ServerId *string `type:"string" required:"true"` @@ -3221,7 +3230,7 @@ type ListUsersOutput struct { // When you can get additional results from the ListUsers call, a NextToken // parameter is returned in the output. You can then pass in a subsequent command - // the NextToken parameter to continue listing additional users. + // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` // A system-assigned unique identifier for an SFTP server that the users are @@ -3280,9 +3289,9 @@ type ListedServer struct { EndpointType *string `type:"string" enum:"EndpointType"` // The authentication method used to validate a user for the server that was - // specified. listed. This can include Secure Shell (SSH), user name and password - // combinations, or your own custom authentication method. Valid values include - // SERVICE_MANAGED or API_GATEWAY. + // specified. This can include Secure Shell (SSH), user name and password combinations, + // or your own custom authentication method. Valid values include SERVICE_MANAGED + // or API_GATEWAY. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` // The AWS Identity and Access Management entity that allows the server to turn @@ -3298,7 +3307,7 @@ type ListedServer struct { // and transfer files. A State value of OFFLINE means that the server cannot // perform file transfer operations. // - // The states of STARTING and STOPPING indicated that the server is in an intermediate + // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` @@ -3365,7 +3374,7 @@ type ListedUser struct { _ struct{} `type:"structure"` // This property is the unique Amazon Resource Name (ARN) for the user that - // you wish to learn about. + // you want to learn about. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -3375,7 +3384,7 @@ type ListedUser struct { HomeDirectory *string `type:"string"` // The role in use by this user. A role is an AWS Identity and Access Management - // (IAM) entity that in this case allows the SFTP server to act on a user's + // (IAM) entity that, in this case, allows the SFTP server to act on a user's // behalf. It allows the server to inherit the trust relationship that enables // that user to perform file operations to their Amazon S3 bucket. Role *string `type:"string"` @@ -3731,19 +3740,19 @@ func (s TagResourceOutput) GoString() string { type TestIdentityProviderInput struct { _ struct{} `type:"structure"` - // A system assigned identifier for a specific server. That server's user authentication + // A system-assigned identifier for a specific server. That server's user authentication // method is tested with a user name and password. // // ServerId is a required field ServerId *string `type:"string" required:"true"` - // This request parameter is name of the user account to be tested. + // This request parameter is the name of the user account to be tested. // // UserName is a required field UserName *string `type:"string" required:"true"` // The password of the user account to be tested. - UserPassword *string `type:"string"` + UserPassword *string `type:"string" sensitive:"true"` } // String returns the string representation @@ -3793,9 +3802,12 @@ func (s *TestIdentityProviderInput) SetUserPassword(v string) *TestIdentityProvi type TestIdentityProviderOutput struct { _ struct{} `type:"structure"` - // The result of the authorization test as a message. + // A message that indicates whether the test was successful or not. Message *string `type:"string"` + // The response that is returned from your API Gateway. + Response *string `type:"string"` + // The HTTP status code that is the response from your API Gateway. // // StatusCode is a required field @@ -3823,6 +3835,12 @@ func (s *TestIdentityProviderOutput) SetMessage(v string) *TestIdentityProviderO return s } +// SetResponse sets the Response field's value. +func (s *TestIdentityProviderOutput) SetResponse(v string) *TestIdentityProviderOutput { + s.Response = &v + return s +} + // SetStatusCode sets the StatusCode field's value. func (s *TestIdentityProviderOutput) SetStatusCode(v int64) *TestIdentityProviderOutput { s.StatusCode = &v @@ -3929,9 +3947,11 @@ type UpdateServerInput struct { // // If you aren't planning to migrate existing users from an existing SFTP server // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. For more information, see change-host-key + // a server's host key can be disruptive. + // + // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" // in the AWS SFTP User Guide. - HostKey *string `type:"string"` + HostKey *string `type:"string" sensitive:"true"` // This response parameter is an array containing all of the information required // to call a customer's authentication API method. @@ -4037,16 +4057,25 @@ func (s *UpdateServerOutput) SetServerId(v string) *UpdateServerOutput { type UpdateUserInput struct { _ struct{} `type:"structure"` - // The HomeDirectory parameter specifies the landing directory (folder) for - // a user when they log in to the server using their client. An example would - // be: /home/username . + // A parameter that specifies the landing directory (folder) for a user when + // they log in to the server using their client. An example is /home/username . HomeDirectory *string `type:"string"` // Allows you to supply a scope-down policy for your user so you can use the // same AWS Identity and Access Management (IAM) role across multiple users. - // The policy scopes down users access to portions of your Amazon S3 bucket. + // The policy scopes down user access to portions of your Amazon S3 bucket. // Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, // and ${Transfer:HomeBucket}. + // + // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the + // policy as a JSON blob and pass it in the Policy argument. + // + // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating + // a Scope-Down Policy. + // + // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // in the AWS Security Token Service API Reference. Policy *string `type:"string"` // The IAM role that controls your user's access to your Amazon S3 bucket. The @@ -4181,7 +4210,7 @@ const ( // use for a server's users. For SERVICE_MANAGED authentication, the Secure // Shell (SSH) public keys are stored with a user on an SFTP server instance. // For API_GATEWAY authentication, your custom authentication method is implemented -// by using an API call. A server can only have one method of authentication. +// by using an API call. A server can have only one method of authentication. const ( // IdentityProviderTypeServiceManaged is a IdentityProviderType enum value IdentityProviderTypeServiceManaged = "SERVICE_MANAGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go index dde22377781..51a9c01b3df 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go @@ -12,7 +12,7 @@ // and partners, or their applications. With your data in S3, you can use it // with AWS services for processing, analytics, machine learning, and archiving. // Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no -// infrastructure to buy and setup. +// infrastructure to buy and set up. // // See https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05 for more information on this service. // diff --git a/vendor/modules.txt b/vendor/modules.txt index bc94ebe4e59..4d20bb13581 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.23.0 +# github.com/aws/aws-sdk-go v1.23.8 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr diff --git a/website/docs/r/appmesh_route.html.markdown b/website/docs/r/appmesh_route.html.markdown index f011866f53b..eee4de43a75 100644 --- a/website/docs/r/appmesh_route.html.markdown +++ b/website/docs/r/appmesh_route.html.markdown @@ -42,6 +42,41 @@ resource "aws_appmesh_route" "serviceb" { } ``` +### HTTP Header Routing + +```hcl +resource "aws_appmesh_route" "serviceb" { + name = "serviceB-route" + mesh_name = "${aws_appmesh_mesh.simple.id}" + virtual_router_name = "${aws_appmesh_virtual_router.serviceb.name}" + + spec { + http_route { + match { + method = "POST" + prefix = "/" + scheme = "https" + + header { + name = "clientRequestId" + + match { + prefix = "123" + } + } + } + + action { + weighted_target { + virtual_node = "${aws_appmesh_virtual_node.serviceb.name}" + weight = 100 + } + } + } + } +} +``` + ### TCP Routing ```hcl @@ -76,6 +111,8 @@ The following arguments are supported: The `spec` object supports the following: * `http_route` - (Optional) The HTTP routing information for the route. +* `priority` - (Optional) The priority for the route, between `0` and `1000`. +Routes are matched based on the specified value, where `0` is the highest priority. * `tcp_route` - (Optional) The TCP routing information for the route. The `http_route` object supports the following: @@ -92,16 +129,38 @@ The `action` object supports the following: * `weighted_target` - (Required) The targets that traffic is routed to when a request matches the route. You can specify one or more targets and their relative weights with which to distribute traffic. -The `match` object supports the following: +The `http_route`'s `match` object supports the following: * `prefix` - (Required) Specifies the path with which to match requests. This parameter must always start with /, which by itself matches all requests to the virtual router service name. +* `header` - (Optional) The client request headers to match on. +* `method` - (Optional) The client request header method to match on. Valid values: `GET`, `HEAD`, `POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`. +* `scheme` - (Optional) The client request header scheme to match on. Valid values: `http`, `https`. The `weighted_target` object supports the following: * `virtual_node` - (Required) The virtual node to associate with the weighted target. * `weight` - (Required) The relative weight of the weighted target. An integer between 0 and 100. +The `header` object supports the following: + +* `name` - (Required) A name for the HTTP header in the client request that will be matched on. +* `invert` - (Optional) If `true`, the match is on the opposite of the `match` method and value. Default is `false`. +* `match` - (Optional) The method and value to match the header value sent with a request. Specify one match method. + +The `header`'s `match` object supports the following: + +* `exact` - (Optional) The header value sent by the client must match the specified value exactly. +* `prefix` - (Optional) The header value sent by the client must begin with the specified characters. +* `range`- (Optional) The object that specifies the range of numbers that the header value sent by the client must be included in. +* `regex` - (Optional) The header value sent by the client must include the specified characters. +* `suffix` - (Optional) The header value sent by the client must end with the specified characters. + +The `range` object supports the following: + +* `end` - (Required) The end of the range. +* `start` - (Requited) The start of the range. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: